Merge remote-tracking branch 'apache/master' into HDDS-2823
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 8fc3747..7c2f5db 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -193,6 +193,10 @@
       <artifactId>hadoop-hdds-interface-client</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-interface-admin</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.hamcrest</groupId>
       <artifactId>hamcrest-all</artifactId>
       <scope>test</scope>
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index b08f0f7..aa082e8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds;
 
 import javax.management.ObjectName;
+import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -27,6 +28,7 @@
 import java.nio.file.Path;
 import java.util.Collection;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.OptionalInt;
@@ -39,6 +41,8 @@
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeInfo;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
@@ -51,6 +55,11 @@
 import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -224,27 +233,50 @@
    */
   public static Collection<InetSocketAddress> getSCMAddresses(
       ConfigurationSource conf) {
-    Collection<String> names =
-        conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
-    if (names.isEmpty()) {
-      throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES
-          + " need to be a set of valid DNS names or IP addresses."
-          + " Empty address list found.");
-    }
 
-    Collection<InetSocketAddress> addresses = new HashSet<>(names.size());
-    for (String address : names) {
-      Optional<String> hostname = getHostName(address);
-      if (!hostname.isPresent()) {
-        throw new IllegalArgumentException("Invalid hostname for SCM: "
-            + address);
+    // First check HA style config, if not defined fall back to OZONE_SCM_NAMES
+
+    if (SCMHAUtils.getScmServiceId(conf) != null) {
+      List<SCMNodeInfo> scmNodeInfoList = SCMNodeInfo.buildNodeInfo(conf);
+      Collection<InetSocketAddress> scmAddressList =
+          new HashSet<>(scmNodeInfoList.size());
+      for (SCMNodeInfo scmNodeInfo : scmNodeInfoList) {
+        scmAddressList.add(
+            NetUtils.createSocketAddr(scmNodeInfo.getScmDatanodeAddress()));
       }
-      int port = getHostPort(address)
-          .orElse(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT);
-      InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(), port);
-      addresses.add(addr);
+      return scmAddressList;
+    } else {
+      // fall back to OZONE_SCM_NAMES.
+      Collection<String> names =
+          conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
+      if (names.isEmpty()) {
+        throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES
+            + " need to be a set of valid DNS names or IP addresses."
+            + " Empty address list found.");
+      }
+
+      Collection<InetSocketAddress> addresses = new HashSet<>(names.size());
+      for (String address : names) {
+        Optional<String> hostname = getHostName(address);
+        if (!hostname.isPresent()) {
+          throw new IllegalArgumentException("Invalid hostname for SCM: "
+              + address);
+        }
+        int port = getHostPort(address)
+            .orElse(conf.getInt(OZONE_SCM_DATANODE_PORT_KEY,
+                OZONE_SCM_DATANODE_PORT_DEFAULT));
+        InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(),
+            port);
+        addresses.add(addr);
+      }
+
+      if (addresses.size() > 1) {
+        LOG.warn("When SCM HA is configured, configure {} appended with " +
+            "serviceId and nodeId. {} is deprecated.", OZONE_SCM_ADDRESS_KEY,
+            OZONE_SCM_NAMES);
+      }
+      return addresses;
     }
-    return addresses;
   }
 
   /**
@@ -279,8 +311,8 @@
   public static InetSocketAddress getSingleSCMAddress(
       ConfigurationSource conf) {
     Collection<InetSocketAddress> singleton = getSCMAddresses(conf);
-    Preconditions.checkArgument(singleton.size() == 1,
-        MULTIPLE_SCM_NOT_YET_SUPPORTED);
+   // Preconditions.checkArgument(singleton.size() == 1,
+    //    MULTIPLE_SCM_NOT_YET_SUPPORTED);
     return singleton.iterator().next();
   }
 
@@ -490,6 +522,14 @@
         "Path should be a descendant of %s", ancestor);
   }
 
+  public static File createDir(String dirPath) {
+    File dirFile = new File(dirPath);
+    if (!dirFile.mkdirs() && !dirFile.exists()) {
+      throw new IllegalArgumentException("Unable to create path: " + dirFile);
+    }
+    return dirFile;
+  }
+
   /**
    * Leverages the Configuration.getPassword method to attempt to get
    * passwords from the CredentialProvider API before falling back to
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/AddSCMRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/AddSCMRequest.java
new file mode 100644
index 0000000..8133c57
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/AddSCMRequest.java
@@ -0,0 +1,113 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+/**
+ * Class for ADD SCM request to be sent by Bootstrapping SCM to existing
+ * leader SCM.
+ */
+public class AddSCMRequest {
+
+  private final String clusterId;
+  private final String scmId;
+  private final String ratisAddr;
+
+  public AddSCMRequest(String clusterId, String scmId, String addr) {
+    this.clusterId = clusterId;
+    this.scmId = scmId;
+    this.ratisAddr = addr;
+  }
+
+  public static AddSCMRequest getFromProtobuf(
+      HddsProtos.AddScmRequestProto proto) {
+    return new Builder().setClusterId(proto.getClusterId())
+        .setScmId(proto.getScmId()).setRatisAddr(proto.getRatisAddr()).build();
+  }
+
+  public HddsProtos.AddScmRequestProto getProtobuf() {
+    return HddsProtos.AddScmRequestProto.newBuilder().setClusterId(clusterId)
+        .setScmId(scmId).setRatisAddr(ratisAddr).build();
+  }
+  /**
+   * Builder for AddSCMRequest.
+   */
+  public static class Builder {
+    private String clusterId;
+    private String scmId;
+    private String ratisAddr;
+
+
+    /**
+     * sets the cluster id.
+     * @param cid clusterId to be set
+     * @return Builder for AddSCMRequest
+     */
+    public AddSCMRequest.Builder setClusterId(String cid) {
+      this.clusterId = cid;
+      return this;
+    }
+
+    /**
+     * sets the scmId.
+     * @param id scmId
+     * @return Builder for AddSCMRequest
+     */
+    public AddSCMRequest.Builder setScmId(String id) {
+      this.scmId = id;
+      return this;
+    }
+
+    /**
+     * Set ratis address in Scm HA.
+     * @param   addr  address in the format of [ip|hostname]:port
+     * @return  Builder for AddSCMRequest
+     */
+    public AddSCMRequest.Builder setRatisAddr(String addr) {
+      this.ratisAddr = addr;
+      return this;
+    }
+
+    public AddSCMRequest build() {
+      return new AddSCMRequest(clusterId, scmId, ratisAddr);
+    }
+  }
+
+  /**
+   * Gets the clusterId from the Version file.
+   * @return ClusterId
+   */
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  /**
+   * Gets the SCM Id from the Version file.
+   * @return SCM Id
+   */
+  public String getScmId() {
+    return scmId;
+  }
+
+  public String getRatisAddr() {
+    return ratisAddr;
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index aecf45c..6dd3856 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -200,6 +200,10 @@
   public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
       "ozone.scm.https-address";
 
+  public static final String OZONE_SCM_ADDRESS_KEY =
+      "ozone.scm.address";
+  public static final String OZONE_SCM_BIND_HOST_DEFAULT =
+      "0.0.0.0";
   public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
@@ -285,8 +289,30 @@
   // able to send back a new list to the datanodes.
   public static final String OZONE_SCM_NAMES = "ozone.scm.names";
 
-  public static final int OZONE_SCM_DEFAULT_PORT =
-      OZONE_SCM_DATANODE_PORT_DEFAULT;
+  public static final String OZONE_SCM_DEFAULT_SERVICE_ID =
+      "ozone.scm.default.service.id";
+
+  public static final String OZONE_SCM_SERVICE_IDS_KEY =
+      "ozone.scm.service.ids";
+  public static final String OZONE_SCM_NODES_KEY =
+      "ozone.scm.nodes";
+  public static final String OZONE_SCM_NODE_ID_KEY =
+      "ozone.scm.node.id";
+
+  /**
+   * Optional config, if being set will cause scm --init to only take effect on
+   * the specific node and ignore scm --bootstrap cmd.
+   * Similarly, scm --init will be ignored on the non-primordial scm nodes.
+   * With the config set, applications/admins can safely execute init and
+   * bootstrap commands safely on all scm instances, for example kubernetes
+   * deployments.
+   *
+   * If a cluster is upgraded from non-ratis to ratis based SCM, scm --init
+   * needs to re-run for switching from
+   * non-ratis based SCM to ratis-based SCM on the primary node.
+   */
+  public static final String OZONE_SCM_PRIMORDIAL_NODE_ID_KEY =
+      "ozone.scm.primordial.node.id";
   // The path where datanode ID is to be written to.
   // if this value is not set then container startup will fail.
   public static final String OZONE_SCM_DATANODE_ID_DIR =
@@ -364,6 +390,10 @@
   public static final String HDDS_SCM_WATCHER_TIMEOUT =
       "hdds.scm.watcher.timeout";
 
+  public static final String OZONE_SCM_SEQUENCE_ID_BATCH_SIZE =
+      "ozone.scm.sequence.id.batch.size";
+  public static final int OZONE_SCM_SEQUENCE_ID_BATCH_SIZE_DEFAULT = 1000;
+
   public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
       "10m";
 
@@ -376,11 +406,105 @@
   public static final String HDDS_TRACING_ENABLED = "hdds.tracing.enabled";
   public static final boolean HDDS_TRACING_ENABLED_DEFAULT = false;
 
+  // SCM Ratis related
+  public static final String OZONE_SCM_HA_ENABLE_KEY
+      = "ozone.scm.ratis.enable";
+  public static final boolean OZONE_SCM_HA_ENABLE_DEFAULT
+      = false;
+  public static final String OZONE_SCM_RATIS_PORT_KEY
+      = "ozone.scm.ratis.port";
+  public static final int OZONE_SCM_RATIS_PORT_DEFAULT
+      = 9865;
+  public static final String OZONE_SCM_GRPC_PORT_KEY
+      = "ozone.scm.grpc.port";
+  public static final int OZONE_SCM_GRPC_PORT_DEFAULT
+      = 9866;
+  public static final String OZONE_SCM_RATIS_RPC_TYPE_KEY
+      = "ozone.scm.ratis.rpc.type";
+  public static final String OZONE_SCM_RATIS_RPC_TYPE_DEFAULT
+      = "GRPC";
+
+  // SCM Ratis Log configurations
+  public static final String OZONE_SCM_RATIS_STORAGE_DIR
+      = "ozone.scm.ratis.storage.dir";
+  public static final String OZONE_SCM_RATIS_SEGMENT_SIZE_KEY
+      = "ozone.scm.ratis.segment.size";
+  public static final String OZONE_SCM_RATIS_SEGMENT_SIZE_DEFAULT
+      = "16KB";
+  public static final String OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
+      = "ozone.scm.ratis.segment.preallocated.size";
+  public static final String OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
+      = "16KB";
+
+  // SCM Ratis Log Appender configurations
+  public static final String
+      OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
+      "ozone.scm.ratis.log.appender.queue.num-elements";
+  public static final int
+      OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1024;
+  public static final String OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT =
+      "ozone.scm.ratis.log.appender.queue.byte-limit";
+  public static final String
+      OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
+  public static final String OZONE_SCM_RATIS_LOG_PURGE_GAP =
+      "ozone.scm.ratis.log.purge.gap";
+  public static final int OZONE_SCM_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000;
+
+  // SCM Ratis server configurations
+  public static final String OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_KEY
+      = "ozone.scm.ratis.server.request.timeout";
+  public static final TimeDuration
+      OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT
+      = TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
+  public static final String
+      OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY
+      = "ozone.scm.ratis.server.retry.cache.timeout";
+  public static final TimeDuration
+      OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT
+      = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
+  public static final String OZONE_SCM_RATIS_MINIMUM_TIMEOUT_KEY
+      = "ozone.scm.ratis.minimum.timeout";
+  public static final TimeDuration OZONE_SCM_RATIS_MINIMUM_TIMEOUT_DEFAULT
+      = TimeDuration.valueOf(1, TimeUnit.SECONDS);
+
+  // SCM Ratis Leader Election configurations
+  public static final String
+      OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
+      "ozone.scm.ratis.leader.election.minimum.timeout.duration";
+  public static final TimeDuration
+      OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
+      TimeDuration.valueOf(1, TimeUnit.SECONDS);
+  public static final String OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY
+      = "ozone.scm.ratis.server.failure.timeout.duration";
+  public static final TimeDuration
+      OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT
+      = TimeDuration.valueOf(120, TimeUnit.SECONDS);
+
+  // SCM Leader server role check interval
+  public static final String OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_KEY
+      = "ozone.scm.ratis.server.role.check.interval";
+  public static final TimeDuration
+      OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT
+      = TimeDuration.valueOf(15, TimeUnit.SECONDS);
+
   public static final String OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL =
       "ozone.scm.datanode.admin.monitor.interval";
   public static final String OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL_DEFAULT =
       "30s";
 
+  public static final String HDDS_DATANODE_UPGRADE_LAYOUT_INLINE =
+      "hdds.datanode.upgrade.layout.inline";
+  public static final boolean HDDS_DATANODE_UPGRADE_LAYOUT_INLINE_DEFAULT =
+      true;
+
+
+  // Temporary config which will be used only for test only purposes until
+  // SCM HA Security work is completed. This config should not be modified by
+  // users.
+  public static final String OZONE_SCM_HA_SECURITY_SUPPORTED =
+      "hdds.scm.ha.security.enable";
+  public static final boolean OZONE_SCM_HA_SECURITY_SUPPORTED_DEFAULT = false;
+
   /**
    * Never constructed.
    */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
index 6236feb..b9d823e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hdds.scm;
 
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * ScmInfo wraps the result returned from SCM#getScmInfo which
  * contains clusterId and the SCM Id.
@@ -25,6 +28,7 @@
 public final class ScmInfo {
   private String clusterId;
   private String scmId;
+  private List<String> peerRoles;
 
   /**
    * Builder for ScmInfo.
@@ -32,6 +36,11 @@
   public static class Builder {
     private String clusterId;
     private String scmId;
+    private List<String> peerRoles;
+
+    public Builder() {
+      peerRoles = new ArrayList<>();
+    }
 
     /**
      * sets the cluster id.
@@ -53,14 +62,25 @@
       return this;
     }
 
+    /**
+     * Set peer address in Scm HA.
+     * @param roles ratis peer address in the format of [ip|hostname]:port
+     * @return  Builder for scmInfo
+     */
+    public Builder setRatisPeerRoles(List<String> roles) {
+      peerRoles.addAll(roles);
+      return this;
+    }
+
     public ScmInfo build() {
-      return new ScmInfo(clusterId, scmId);
+      return new ScmInfo(clusterId, scmId, peerRoles);
     }
   }
 
-  private ScmInfo(String clusterId, String scmId) {
+  private ScmInfo(String clusterId, String scmId, List<String> peerRoles) {
     this.clusterId = clusterId;
     this.scmId = scmId;
+    this.peerRoles = peerRoles;
   }
 
   /**
@@ -78,4 +98,12 @@
   public String getScmId() {
     return scmId;
   }
+
+  /**
+   * Gets the list of peer roles (currently address) in Scm HA.
+   * @return List of peer address
+   */
+  public List<String> getRatisPeerRoles() {
+    return peerRoles;
+  }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index 4979df3..e5c5680 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -306,6 +306,11 @@
   boolean getReplicationManagerStatus() throws IOException;
 
   /**
+   * returns the list of ratis peer roles. Currently only include peer address.
+   */
+  List<String> getScmRatisRoles() throws IOException;
+
+  /**
    * Get usage information of datanode by ipaddress or uuid.
    *
    * @param ipaddress datanode ipaddress String
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
index bb44da4..9525050 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -23,6 +23,7 @@
 import org.apache.commons.lang3.builder.CompareToBuilder;
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 /**
  * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
@@ -34,13 +35,14 @@
 
   private final long id;
 
-  // TODO: make this private.
   /**
    * Constructs ContainerID.
    *
    * @param id int
    */
   public ContainerID(long id) {
+    Preconditions.checkState(id >= 0,
+        "Container ID should be positive. %s.", id);
     this.id = id;
   }
 
@@ -49,9 +51,7 @@
    * @param containerID  long
    * @return ContainerID.
    */
-  public static ContainerID valueof(final long containerID) {
-    Preconditions.checkState(containerID > 0,
-        "Container ID should be a positive long. "+ containerID);
+  public static ContainerID valueOf(final long containerID) {
     return new ContainerID(containerID);
   }
 
@@ -60,14 +60,30 @@
    *
    * @return int
    */
+  @Deprecated
+  /*
+   * Don't expose the int value.
+   */
   public long getId() {
     return id;
   }
 
+  /**
+   * Use proto message.
+   */
+  @Deprecated
   public byte[] getBytes() {
     return Longs.toByteArray(id);
   }
 
+  public HddsProtos.ContainerID getProtobuf() {
+    return HddsProtos.ContainerID.newBuilder().setId(id).build();
+  }
+
+  public static ContainerID getFromProtobuf(HddsProtos.ContainerID proto) {
+    return ContainerID.valueOf(proto.getId());
+  }
+
   @Override
   public boolean equals(final Object o) {
     if (this == o) {
@@ -81,14 +97,14 @@
     final ContainerID that = (ContainerID) o;
 
     return new EqualsBuilder()
-        .append(getId(), that.getId())
+        .append(id, that.id)
         .isEquals();
   }
 
   @Override
   public int hashCode() {
     return new HashCodeBuilder(61, 71)
-        .append(getId())
+        .append(id)
         .toHashCode();
   }
 
@@ -96,7 +112,7 @@
   public int compareTo(final ContainerID that) {
     Preconditions.checkNotNull(that);
     return new CompareToBuilder()
-        .append(this.getId(), that.getId())
+        .append(this.id, that.id)
         .build();
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 93d9221..25c3c77 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -125,6 +125,11 @@
 
   }
 
+  /**
+   * This method is depricated, use {@code containerID()} which returns
+   * {@link ContainerID} object.
+   */
+  @Deprecated
   public long getContainerID() {
     return containerID;
   }
@@ -183,7 +188,7 @@
   }
 
   public ContainerID containerID() {
-    return new ContainerID(getContainerID());
+    return ContainerID.valueOf(containerID);
   }
 
   /**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
index 803aa03..824a1f5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
@@ -91,7 +91,7 @@
       HddsProtos.ExcludeListProto excludeListProto) {
     ExcludeList excludeList = new ExcludeList();
     excludeListProto.getContainerIdsList().forEach(id -> {
-      excludeList.addConatinerId(ContainerID.valueof(id));
+      excludeList.addConatinerId(ContainerID.valueOf(id));
     });
     DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
     excludeListProto.getDatanodesList().forEach(dn -> {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index 48a8e05..82e3034 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -124,6 +124,7 @@
     FAILED_TO_ALLOCATE_ENOUGH_BLOCKS,
     INTERNAL_ERROR,
     FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY,
-    FAILED_TO_INIT_LEADER_CHOOSE_POLICY
+    FAILED_TO_INIT_LEADER_CHOOSE_POLICY,
+    SCM_NOT_LEADER
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java
new file mode 100644
index 0000000..5fe251b
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java
@@ -0,0 +1,320 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.net.InetSocketAddress;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigType;
+import org.apache.hadoop.net.NetUtils;
+
+import static org.apache.hadoop.hdds.conf.ConfigTag.HA;
+import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.RATIS;
+import static org.apache.hadoop.hdds.conf.ConfigTag.SCM;
+
+/**
+ * Configuration used by SCM HA.
+ */
+@ConfigGroup(prefix = "ozone.scm.ha")
+public class SCMHAConfiguration {
+
+  @Config(key = "ratis.storage.dir",
+      type = ConfigType.STRING,
+      defaultValue = "",
+      tags = {OZONE, SCM, HA, RATIS},
+      description = "Storage directory used by SCM to write Ratis logs."
+  )
+  private String ratisStorageDir;
+
+  @Config(key = "ratis.bind.host",
+      type = ConfigType.STRING,
+      defaultValue = "0.0.0.0",
+      tags = {OZONE, SCM, HA, RATIS},
+      description = "Host used by SCM for binding Ratis Server."
+  )
+  private String ratisBindHost = "0.0.0.0";
+
+  @Config(key = "ratis.bind.port",
+      type = ConfigType.INT,
+      defaultValue = "9865",
+      tags = {OZONE, SCM, HA, RATIS},
+      description = "Port used by SCM for Ratis Server."
+  )
+  private int ratisBindPort = 9865;
+
+
+  @Config(key = "ratis.rpc.type",
+      type = ConfigType.STRING,
+      defaultValue = "GRPC",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "Ratis supports different kinds of transports like" +
+          " netty, GRPC, Hadoop RPC etc. This picks one of those for" +
+          " this cluster."
+  )
+  private String ratisRpcType;
+
+  @Config(key = "ratis.segment.size",
+      type = ConfigType.SIZE,
+      defaultValue = "16KB",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The size of the raft segment used by Apache Ratis on" +
+          " SCM. (16 KB by default)"
+  )
+  private long raftSegmentSize = 16L * 1024L;
+
+  @Config(key = "ratis.segment.preallocated.size",
+      type = ConfigType.SIZE,
+      defaultValue = "16KB",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The size of the buffer which is preallocated for" +
+          " raft segment used by Apache Ratis on SCM.(16 KB by default)"
+  )
+  private long raftSegmentPreAllocatedSize = 16L * 1024L;
+
+  @Config(key = "ratis.log.appender.queue.num-elements",
+      type = ConfigType.INT,
+      defaultValue = "1024",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "Number of operation pending with Raft's Log Worker."
+  )
+  private int raftLogAppenderQueueNum = 1024;
+
+  @Config(key = "ratis.log.appender.queue.byte-limit",
+      type = ConfigType.SIZE,
+      defaultValue = "32MB",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "Byte limit for Raft's Log Worker queue."
+  )
+  private int raftLogAppenderQueueByteLimit = 32 * 1024 * 1024;
+
+  @Config(key = "ratis.log.purge.enabled",
+      type = ConfigType.BOOLEAN,
+      defaultValue = "false",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "whether enable raft log purge."
+  )
+  private boolean raftLogPurgeEnabled = false;
+
+  @Config(key = "ratis.log.purge.gap",
+      type = ConfigType.INT,
+      defaultValue = "1000000",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The minimum gap between log indices for Raft server to" +
+          " purge its log segments after taking snapshot."
+  )
+  private int raftLogPurgeGap = 1000000;
+
+  @Config(key = "ratis.snapshot.threshold",
+      type = ConfigType.LONG,
+      defaultValue = "1000",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The threshold to trigger a Ratis taking snapshot " +
+          "operation for SCM")
+  private long ratisSnapshotThreshold = 1000L;
+
+  @Config(key = "ratis.request.timeout",
+      type = ConfigType.TIME,
+      defaultValue = "3000ms",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The timeout duration for SCM's Ratis server RPC."
+  )
+  private long ratisRequestTimeout = 3000L;
+
+  @Config(key = "ratis.server.retry.cache.timeout",
+      type = ConfigType.TIME,
+      defaultValue = "60s",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "Retry Cache entry timeout for SCM's ratis server."
+  )
+  private long ratisRetryCacheTimeout = 60 * 1000L;
+
+
+  @Config(key = "ratis.leader.election.timeout",
+      type = ConfigType.TIME,
+      defaultValue = "5s",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The minimum timeout duration for SCM ratis leader" +
+          " election. Default is 1s."
+  )
+  private long ratisLeaderElectionTimeout = 5 * 1000L;
+
+  @Config(key = "ratis.leader.ready.wait.timeout",
+      type = ConfigType.TIME,
+      defaultValue = "60s",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The minimum timeout duration for waiting for" +
+                    "leader readiness"
+  )
+  private long ratisLeaderReadyWaitTimeout = 60 * 1000L;
+
+  @Config(key = "ratis.leader.ready.check.interval",
+      type = ConfigType.TIME,
+      defaultValue = "2s",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The interval between ratis server performing" +
+                    "a leader readiness check"
+  )
+  private long ratisLeaderReadyCheckInterval = 2 * 1000L;
+
+  @Config(key = "ratis.server.failure.timeout.duration",
+      type = ConfigType.TIME,
+      defaultValue = "120s",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The timeout duration for ratis server failure" +
+          " detection, once the threshold has reached, the ratis state" +
+          " machine will be informed about the failure in the ratis ring."
+  )
+  private long ratisNodeFailureTimeout = 120 * 1000L;
+
+  @Config(key = "ratis.server.role.check.interval",
+      type = ConfigType.TIME,
+      defaultValue = "15s",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The interval between SCM leader performing a role" +
+          " check on its ratis server. Ratis server informs SCM if it loses" +
+          " the leader role. The scheduled check is an secondary check to" +
+          " ensure that the leader role is updated periodically"
+  )
+  private long ratisRoleCheckerInterval = 15 * 1000L;
+
+  @Config(key = "ratis.snapshot.dir",
+      type = ConfigType.STRING,
+      defaultValue = "",
+      tags = {SCM, OZONE, HA, RATIS},
+      description = "The ratis snapshot dir location"
+  )
+  private String ratisSnapshotDir;
+
+  @Config(key = "grpc.deadline.interval",
+      type = ConfigType.TIME,
+      defaultValue = "30m",
+      tags = {OZONE, SCM, HA, RATIS},
+      description = "Deadline for SCM DB checkpoint interval."
+  )
+  private long grpcDeadlineInterval = 30 * 60 * 1000L;
+
+  public long getGrpcDeadlineInterval() {
+    return grpcDeadlineInterval;
+  }
+
+
+  public String getRatisStorageDir() {
+    return ratisStorageDir;
+  }
+
+  public String getRatisSnapshotDir() {
+    return ratisSnapshotDir;
+  }
+
+  public void setRatisStorageDir(String dir) {
+    this.ratisStorageDir = dir;
+  }
+
+  public void setRatisSnapshotDir(String dir) {
+    this.ratisSnapshotDir = dir;
+  }
+
+  public void setRaftLogPurgeGap(int gap) {
+    this.raftLogPurgeGap = gap;
+  }
+
+  public InetSocketAddress getRatisBindAddress() {
+    return NetUtils.createSocketAddr(ratisBindHost, ratisBindPort);
+  }
+
+  public String getRatisRpcType() {
+    return ratisRpcType;
+  }
+
+  public long getRaftSegmentSize() {
+    return raftSegmentSize;
+  }
+
+  public long getRaftSegmentPreAllocatedSize() {
+    return raftSegmentPreAllocatedSize;
+  }
+
+  public int getRaftLogAppenderQueueNum() {
+    return raftLogAppenderQueueNum;
+  }
+
+  public int getRaftLogAppenderQueueByteLimit() {
+    return raftLogAppenderQueueByteLimit;
+  }
+
+  public boolean getRaftLogPurgeEnabled() {
+    return raftLogPurgeEnabled;
+  }
+
+  public void setRaftLogPurgeEnabled(boolean enabled) {
+    this.raftLogPurgeEnabled = enabled;
+  }
+
+  public int getRaftLogPurgeGap() {
+    return raftLogPurgeGap;
+  }
+
+  public long getRatisSnapshotThreshold() {
+    return ratisSnapshotThreshold;
+  }
+
+  public void setRatisSnapshotThreshold(long threshold) {
+    this.ratisSnapshotThreshold = threshold;
+  }
+
+  public long getRatisRetryCacheTimeout() {
+    return ratisRetryCacheTimeout;
+  }
+
+  public long getRatisRequestTimeout() {
+    Preconditions.checkArgument(ratisRequestTimeout > 1000L,
+        "Ratis request timeout cannot be less than 1000ms.");
+    return ratisRequestTimeout;
+  }
+
+  public long getLeaderElectionMinTimeout() {
+    return ratisLeaderElectionTimeout;
+  }
+
+  public long getLeaderElectionMaxTimeout() {
+    return ratisLeaderElectionTimeout + 200L;
+  }
+
+  public long getLeaderReadyWaitTimeout() {
+    return ratisLeaderReadyWaitTimeout;
+  }
+
+  public void setLeaderReadyWaitTimeout(long mills) {
+    ratisLeaderReadyWaitTimeout = mills;
+  }
+
+  public long getLeaderReadyCheckInterval() {
+    return ratisLeaderReadyCheckInterval;
+  }
+
+  public long getRatisNodeFailureTimeout() {
+    return ratisNodeFailureTimeout;
+  }
+
+  public long getRatisRoleCheckerInterval() {
+    return ratisRoleCheckerInterval;
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
new file mode 100644
index 0000000..b3fc40e
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+
+import com.google.common.base.Strings;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.server.ServerUtils;
+import org.apache.hadoop.ozone.ha.ConfUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_RATIS_SNAPSHOT_DIR;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY;
+
+/**
+ * Utility class used by SCM HA.
+ */
+public final class SCMHAUtils {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SCMHAUtils.class);
+  private SCMHAUtils() {
+    // not used
+  }
+
+  // Check if SCM HA is enabled.
+  public static boolean isSCMHAEnabled(ConfigurationSource conf) {
+    return conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY,
+        ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT);
+  }
+
+  public static String getPrimordialSCM(ConfigurationSource conf) {
+    return conf.get(ScmConfigKeys.OZONE_SCM_PRIMORDIAL_NODE_ID_KEY);
+  }
+
+  public static boolean isPrimordialSCM(ConfigurationSource conf,
+      String selfNodeId) {
+    String primordialNode = getPrimordialSCM(conf);
+    return isSCMHAEnabled(conf) && primordialNode != null && primordialNode
+        .equals(selfNodeId);
+  }
+  /**
+   * Get a collection of all scmNodeIds for the given scmServiceId.
+   */
+  public static Collection<String> getSCMNodeIds(ConfigurationSource conf,
+                                                 String scmServiceId) {
+    String key = addSuffix(ScmConfigKeys.OZONE_SCM_NODES_KEY, scmServiceId);
+    return conf.getTrimmedStringCollection(key);
+  }
+
+  public static String  getLocalSCMNodeId(String scmServiceId) {
+    return addSuffix(ScmConfigKeys.OZONE_SCM_NODES_KEY, scmServiceId);
+  }
+
+  /**
+   * Add non empty and non null suffix to a key.
+   */
+  private static String addSuffix(String key, String suffix) {
+    if (suffix == null || suffix.isEmpty()) {
+      return key;
+    }
+    assert !suffix.startsWith(".") :
+        "suffix '" + suffix + "' should not already have '.' prepended.";
+    return key + "." + suffix;
+  }
+
+  /**
+   * Get the local directory where ratis logs will be stored.
+   */
+  public static String getSCMRatisDirectory(ConfigurationSource conf) {
+    String scmRatisDirectory =
+        conf.getObject(SCMHAConfiguration.class).getRatisStorageDir();
+
+    if (Strings.isNullOrEmpty(scmRatisDirectory)) {
+      scmRatisDirectory = ServerUtils.getDefaultRatisDirectory(conf);
+    }
+    return scmRatisDirectory;
+  }
+
+  public static String getSCMRatisSnapshotDirectory(ConfigurationSource conf) {
+    String snapshotDir =
+        conf.getObject(SCMHAConfiguration.class).getRatisStorageDir();
+
+    // If ratis snapshot directory is not set, fall back to ozone.metadata.dir.
+    if (Strings.isNullOrEmpty(snapshotDir)) {
+      LOG.warn("SCM snapshot dir is not configured. Falling back to {} config",
+          OZONE_METADATA_DIRS);
+      File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
+      snapshotDir =
+          Paths.get(metaDirPath.getPath(), SCM_RATIS_SNAPSHOT_DIR).toString();
+    }
+    return snapshotDir;
+  }
+
+  /**
+   * Get SCM ServiceId from OzoneConfiguration.
+   * @param conf
+   * @return SCM service id if defined, else null.
+   */
+  public static String getScmServiceId(ConfigurationSource conf) {
+
+    String localScmServiceId = conf.getTrimmed(
+        ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID);
+
+    Collection<String> scmServiceIds;
+
+    if (localScmServiceId == null) {
+      // There is no default scm service id is being set, fall back to ozone
+      // .scm.service.ids.
+      scmServiceIds = conf.getTrimmedStringCollection(
+          OZONE_SCM_SERVICE_IDS_KEY);
+      if (scmServiceIds.size() > 1) {
+        throw new ConfigurationException("When multiple SCM Service Ids are " +
+            "configured," + OZONE_SCM_DEFAULT_SERVICE_ID + " need to be " +
+            "defined");
+      } else if (scmServiceIds.size() == 1) {
+        localScmServiceId = scmServiceIds.iterator().next();
+      }
+    }
+    return localScmServiceId;
+  }
+
+  /**
+   * Removes the self node from the list of nodes in the
+   * configuration.
+   * @param configuration OzoneConfiguration
+   * @param selfId - Local node Id of SCM.
+   * @return Updated OzoneConfiguration
+   */
+
+  public static OzoneConfiguration removeSelfId(
+      OzoneConfiguration configuration, String selfId) {
+    final OzoneConfiguration conf = new OzoneConfiguration(configuration);
+    String scmNodes = conf.get(ConfUtils
+        .addKeySuffixes(ScmConfigKeys.OZONE_SCM_NODES_KEY,
+            getScmServiceId(conf)));
+    if (scmNodes != null) {
+      String[] parts = scmNodes.split(",");
+      List<String> partsLeft = new ArrayList<>();
+      for (String part : parts) {
+        if (!part.equals(selfId)) {
+          partsLeft.add(part);
+        }
+      }
+      conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY, String.join(",", partsLeft));
+    }
+    return conf;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java
new file mode 100644
index 0000000..f3191ea
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.ozone.ha.ConfUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.OptionalInt;
+
+import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
+import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NODES_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_DUMMY_NODEID;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_DUMMY_SERVICE_ID;
+
+/**
+ * Class which builds SCM Node Information.
+ *
+ * This class is used by SCM clients like OzoneManager, Client, Admin
+ * commands to figure out SCM Node Information to make contact to SCM.
+ */
+public class SCMNodeInfo {
+
+  private static final Logger LOG = LoggerFactory.getLogger(SCMNodeInfo.class);
+  private String serviceId;
+  private String nodeId;
+  private String blockClientAddress;
+  private String scmClientAddress;
+  private String scmSecurityAddress;
+  private String scmDatanodeAddress;
+
+  /**
+   * Build SCM Node information from configuration.
+   * @param conf
+   * @return
+   */
+  public static List<SCMNodeInfo> buildNodeInfo(ConfigurationSource conf) {
+
+    // First figure out scm client address from HA style config.
+    // If service Id is not defined, fall back to non-HA config.
+
+    List<SCMNodeInfo> scmNodeInfoList = new ArrayList<>();
+    String scmServiceId = SCMHAUtils.getScmServiceId(conf);
+    if (scmServiceId != null) {
+      ArrayList< String > scmNodeIds = new ArrayList<>(
+          SCMHAUtils.getSCMNodeIds(conf, scmServiceId));
+      if (scmNodeIds.size() == 0) {
+        throw new ConfigurationException(
+            String.format("Configuration does not have any value set for %s " +
+                    "for the SCM serviceId %s. List of SCM Node ID's should " +
+                    "be specified for an SCM HA service", OZONE_SCM_NODES_KEY,
+                scmServiceId));
+      }
+
+      for (String scmNodeId : scmNodeIds) {
+        String addressKey = ConfUtils.addKeySuffixes(
+            OZONE_SCM_ADDRESS_KEY, scmServiceId, scmNodeId);
+        String scmAddress = conf.get(addressKey);
+        if (scmAddress == null) {
+          throw new ConfigurationException(addressKey + "is not defined");
+        }
+
+        // Get port from Address Key if defined, else fall back to port key.
+        int scmClientPort = getPort(conf, scmServiceId, scmNodeId,
+            OZONE_SCM_CLIENT_ADDRESS_KEY, OZONE_SCM_CLIENT_PORT_KEY,
+            OZONE_SCM_CLIENT_PORT_DEFAULT);
+
+        int scmBlockClientPort = getPort(conf, scmServiceId, scmNodeId,
+            OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+            OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+            OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+
+        int scmSecurityPort = getPort(conf, scmServiceId, scmNodeId,
+            OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
+            OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
+            OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT);
+
+        int scmDatanodePort = getPort(conf, scmServiceId, scmNodeId,
+            OZONE_SCM_DATANODE_ADDRESS_KEY, OZONE_SCM_DATANODE_PORT_KEY,
+            OZONE_SCM_DATANODE_PORT_DEFAULT);
+
+        scmNodeInfoList.add(new SCMNodeInfo(scmServiceId, scmNodeId,
+            buildAddress(scmAddress, scmBlockClientPort),
+            buildAddress(scmAddress, scmClientPort),
+            buildAddress(scmAddress, scmSecurityPort),
+            buildAddress(scmAddress, scmDatanodePort)));
+      }
+      return scmNodeInfoList;
+    } else {
+      scmServiceId = SCM_DUMMY_SERVICE_ID;
+
+      // Following current approach of fall back to
+      // OZONE_SCM_CLIENT_ADDRESS_KEY to figure out hostname.
+
+      String scmBlockClientAddress = getHostNameFromConfigKeys(conf,
+          OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+          OZONE_SCM_CLIENT_ADDRESS_KEY).orElse(null);
+
+      String scmClientAddress = getHostNameFromConfigKeys(conf,
+          OZONE_SCM_CLIENT_ADDRESS_KEY).orElse(null);
+
+      String scmSecurityClientAddress =
+          getHostNameFromConfigKeys(conf,
+              OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
+              OZONE_SCM_CLIENT_ADDRESS_KEY).orElse(null);
+
+      String scmDatanodeAddress =
+          getHostNameFromConfigKeys(conf,
+              OZONE_SCM_DATANODE_ADDRESS_KEY,
+              OZONE_SCM_CLIENT_ADDRESS_KEY).orElse(null);
+
+      int scmBlockClientPort = getPortNumberFromConfigKeys(conf,
+          OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY)
+          .orElse(conf.getInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+              OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
+
+      int scmClientPort = getPortNumberFromConfigKeys(conf,
+          OZONE_SCM_CLIENT_ADDRESS_KEY)
+          .orElse(conf.getInt(OZONE_SCM_CLIENT_PORT_KEY,
+              OZONE_SCM_CLIENT_PORT_DEFAULT));
+
+      int scmSecurityPort = getPortNumberFromConfigKeys(conf,
+          OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY)
+          .orElse(conf.getInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
+              OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT));
+
+      int scmDatanodePort = getPortNumberFromConfigKeys(conf,
+          OZONE_SCM_DATANODE_ADDRESS_KEY)
+          .orElse(conf.getInt(OZONE_SCM_DATANODE_PORT_KEY,
+              OZONE_SCM_DATANODE_PORT_DEFAULT));
+
+      scmNodeInfoList.add(new SCMNodeInfo(scmServiceId,
+          SCM_DUMMY_NODEID,
+          scmBlockClientAddress == null ? null :
+              buildAddress(scmBlockClientAddress, scmBlockClientPort),
+          scmClientAddress == null ? null :
+              buildAddress(scmClientAddress, scmClientPort),
+          scmSecurityClientAddress == null ? null :
+              buildAddress(scmSecurityClientAddress, scmSecurityPort),
+          scmDatanodeAddress == null ? null :
+              buildAddress(scmDatanodeAddress, scmDatanodePort)));
+
+      return scmNodeInfoList;
+
+    }
+
+  }
+
+  private static String buildAddress(String address, int port) {
+    return new StringBuilder().append(address).append(":")
+        .append(port).toString();
+  }
+
+  private static int getPort(ConfigurationSource conf,
+      String scmServiceId, String scmNodeId, String configKey,
+      String portKey, int defaultPort) {
+    String suffixKey = ConfUtils.addKeySuffixes(configKey, scmServiceId,
+        scmNodeId);
+    OptionalInt port = getPortNumberFromConfigKeys(conf, suffixKey);
+
+    if (port.isPresent()) {
+      LOG.info("ConfigKey {} is deprecated, For configuring different " +
+          "ports for each SCM use PortConfigKey {} appended with serviceId " +
+          "and nodeId", configKey, portKey);
+      return port.getAsInt();
+    } else {
+      return conf.getInt(ConfUtils.addKeySuffixes(portKey, scmServiceId,
+          scmNodeId), defaultPort);
+    }
+  }
+
+  /**
+   * SCM Node Info which contains information about scm service address.
+   * @param serviceId
+   * @param nodeId
+   * @param blockClientAddress
+   * @param scmClientAddress
+   * @param scmSecurityAddress
+   * @param scmDatanodeAddress
+   */
+  public SCMNodeInfo(String serviceId, String nodeId,
+      String blockClientAddress, String scmClientAddress,
+      String scmSecurityAddress, String scmDatanodeAddress) {
+    this.serviceId = serviceId;
+    this.nodeId = nodeId;
+    this.blockClientAddress = blockClientAddress;
+    this.scmClientAddress = scmClientAddress;
+    this.scmSecurityAddress = scmSecurityAddress;
+    this.scmDatanodeAddress = scmDatanodeAddress;
+  }
+
+  public String getServiceId() {
+    return serviceId;
+  }
+
+  public String getNodeId() {
+    return nodeId;
+  }
+
+  public String getBlockClientAddress() {
+    return blockClientAddress;
+  }
+
+  public String getScmClientAddress() {
+    return scmClientAddress;
+  }
+
+  public String getScmSecurityAddress() {
+    return scmSecurityAddress;
+  }
+
+  public String getScmDatanodeAddress() {
+    return scmDatanodeAddress;
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
similarity index 87%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
index 4944017..19153b0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
@@ -15,8 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
-
+package org.apache.hadoop.hdds.scm.ha;
 /**
- * This package contains classes related to Apache Ratis for SCM.
- */
+ Utility slasses for SCM HA.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index dc0087f..09391ce 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdds.scm.protocol;
 
 import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
 import org.apache.hadoop.hdds.scm.ScmConfig;
@@ -29,8 +30,11 @@
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
@@ -47,6 +51,14 @@
   long versionID = 1L;
 
   /**
+   * Admin command should take effect on all SCM instance.
+   */
+  Set<Type> ADMIN_COMMAND_TYPE = Collections.unmodifiableSet(EnumSet.of(
+      Type.StartReplicationManager,
+      Type.StopReplicationManager,
+      Type.ForceExitSafeMode));
+
+  /**
    * Asks SCM where a container should be allocated. SCM responds with the
    * set of datanodes that should be used creating this container.
    *
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java
index 2b95df3..d75acc4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java
@@ -37,6 +37,16 @@
 
   /**
    * Ctor.
+   * @param message - Error Message
+   * @param errorCode - Error code
+   */
+  public SCMSecurityException(String message, ErrorCode errorCode) {
+    super(message);
+    this.errorCode = errorCode;
+  }
+
+  /**
+   * Ctor.
    * @param message - Message.
    * @param cause  - Actual cause.
    */
@@ -47,11 +57,23 @@
 
   /**
    * Ctor.
-   * @param message - Message.
+   * @param message - Error Message
+   * @param cause - Actual cause.
+   * @param errorCode - Error code.
+   */
+  public SCMSecurityException(String message, Throwable cause,
+      ErrorCode errorCode) {
+    super(message, cause);
+    this.errorCode = errorCode;
+  }
+
+  /**
+   * Ctor.
+   * @param cause - Actual cause.
    * @param error   - error code.
    */
-  public SCMSecurityException(String message, ErrorCode error) {
-    super(message);
+  public SCMSecurityException(Exception cause, ErrorCode error) {
+    super(cause);
     this.errorCode = error;
   }
 
@@ -72,8 +94,20 @@
    * Error codes to make it easy to decode these exceptions.
    */
   public enum ErrorCode {
+    OK,
+    INVALID_CSR,
+    UNABLE_TO_ISSUE_CERTIFICATE,
+    GET_DN_CERTIFICATE_FAILED,
+    GET_OM_CERTIFICATE_FAILED,
+    GET_SCM_CERTIFICATE_FAILED,
+    GET_CERTIFICATE_FAILED,
+    GET_CA_CERT_FAILED,
+    CERTIFICATE_NOT_FOUND,
+    PEM_ENCODE_FAILED,
+    INTERNAL_ERROR,
     DEFAULT,
     MISSING_BLOCK_TOKEN,
-    BLOCK_TOKEN_VERIFICATION_FAILED
+    BLOCK_TOKEN_VERIFICATION_FAILED,
+    GET_ROOT_CA_CERT_FAILED
   }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java
index 1abdcc3..03e4c53 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java
@@ -50,6 +50,7 @@
 import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE;
 import static java.nio.file.attribute.PosixFilePermission.OWNER_READ;
 import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE;
+import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.PEM_ENCODE_FAILED;
 
 /**
  * A class used to read and write X.509 certificates  PEM encoded Streams.
@@ -125,7 +126,7 @@
       LOG.error("Error in encoding certificate." + certificate
           .getSubjectDN().toString(), e);
       throw new SCMSecurityException("PEM Encoding failed for certificate." +
-          certificate.getSubjectDN().toString(), e);
+          certificate.getSubjectDN().toString(), e, PEM_ENCODE_FAILED);
     }
   }
 
@@ -292,7 +293,8 @@
    * @throws CertificateEncodingException - on Error.
    * @throws IOException                  - on Error.
    */
-  public X509CertificateHolder getCertificateHolder(X509Certificate x509cert)
+  public static X509CertificateHolder getCertificateHolder(
+      X509Certificate x509cert)
       throws CertificateEncodingException, IOException {
     return new X509CertificateHolder(x509cert.getEncoded());
   }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
similarity index 100%
rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/package-info.java
similarity index 87%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/package-info.java
index 4944017..80d32bf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/package-info.java
@@ -15,8 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
+
+package org.apache.hadoop.hdds.server;
 
 /**
- * This package contains classes related to Apache Ratis for SCM.
- */
+ * Server Util classes.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 0651707..b12a022 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -25,6 +25,7 @@
 
 import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.Paths;
 import java.util.regex.Pattern;
 
 import static org.apache.ratis.thirdparty.io.grpc.Metadata.ASCII_STRING_MARSHALLER;
@@ -38,6 +39,8 @@
 
   public static final String STORAGE_DIR = "scm";
   public static final String SCM_ID = "scmUuid";
+  public static final String CLUSTER_ID_PREFIX = "CID-";
+  public static final String SCM_CERT_SERIAL_ID = "scmCertSerialId";
 
   public static final String OZONE_SIMPLE_ROOT_USER = "root";
   public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
@@ -123,7 +126,9 @@
   public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
   public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
   public static final String OM_DB_NAME = "om.db";
+  public static final String SCM_DB_NAME = "scm.db";
   public static final String OM_DB_BACKUP_PREFIX = "om.db.backup.";
+  public static final String SCM_DB_BACKUP_PREFIX = "scm.db.backup.";
 
   public static final String STORAGE_DIR_CHUNKS = "chunks";
   public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH =
@@ -387,15 +392,40 @@
   public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB";
   public static final String CONTAINER_DB_TYPE_LEVELDB = "LevelDB";
 
+  // SCM HA
+  public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault";
+
+  // SCM Ratis snapshot file to store the last applied index
+  public static final String SCM_RATIS_SNAPSHOT_INDEX = "scmRatisSnapshotIndex";
+
+  public static final String SCM_RATIS_SNAPSHOT_TERM = "scmRatisSnapshotTerm";
   // An on-disk transient marker file used when replacing DB with checkpoint
   public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker";
 
 
+  // TODO : rename this to OZONE_RATIS_SNAPSHOT_DIR and use it in both
+  // SCM and OM
   public static final String OM_RATIS_SNAPSHOT_DIR = "snapshot";
+  public static final String SCM_RATIS_SNAPSHOT_DIR = "snapshot";
 
-  public static final long DEFAULT_OM_UPDATE_ID = -1L;  
+  public static final long DEFAULT_OM_UPDATE_ID = -1L;
+
+
+  // SCM default service Id and node Id in non-HA where config is not defined
+  // in non-HA style.
+  public static final String SCM_DUMMY_NODEID = "scmNodeId";
+  public static final String SCM_DUMMY_SERVICE_ID = "scmServiceId";
 
   // CRL Sequence Id
   public static final String CRL_SEQUENCE_ID_KEY = "CRL_SEQUENCE_ID";
 
+  public static final String SCM_CA_PATH = "ca";
+  public static final String SCM_CA_CERT_STORAGE_DIR = "scm";
+  public static final String SCM_SUB_CA_PATH = "sub-ca";
+
+  public static final String SCM_ROOT_CA_COMPONENT_NAME =
+      Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString();
+
+  public static final String SCM_SUB_CA_PREFIX = "scm-sub@";
+  public static final String SCM_ROOT_CA_PREFIX = "scm@";
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
index 102d47a..a4ae55e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java
@@ -44,7 +44,8 @@
   START_REPLICATION_MANAGER,
   STOP_REPLICATION_MANAGER,
   GET_REPLICATION_MANAGER_STATUS,
-  GET_CONTAINER_WITH_PIPELINE_BATCH;
+  GET_CONTAINER_WITH_PIPELINE_BATCH,
+  ADD_SCM;
 
   @Override
   public String getAction() {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
index 55911fc..492931d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
@@ -20,6 +20,7 @@
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.ozone.OzoneConsts;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -198,7 +199,7 @@
    * @return new clusterID
    */
   public static String newClusterID() {
-    return "CID-" + UUID.randomUUID().toString();
+    return OzoneConsts.CLUSTER_ID_PREFIX + UUID.randomUUID().toString();
   }
 
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
similarity index 83%
rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java
rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
index 91c388e..a9de892 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java
@@ -15,7 +15,7 @@
  * the License.
  */
 
-package org.apache.hadoop.ozone.om.ratis;
+package org.apache.hadoop.ozone.common.ha.ratis;
 
 import org.apache.ratis.server.protocol.TermIndex;
 import org.apache.ratis.server.storage.FileInfo;
@@ -29,14 +29,14 @@
 
 /**
  * This class captures the snapshotIndex and term of the latest snapshot in
- * the OM.
+ * the server.
  * Ratis server loads the snapshotInfo during startup and updates the
- * lastApplied index to this snapshotIndex. OM SnapshotInfo does not contain
+ * lastApplied index to this snapshotIndex. SnapshotInfo does not contain
  * any files. It is used only to store/ update the last applied index and term.
  */
-public class OMRatisSnapshotInfo implements SnapshotInfo {
+public class RatisSnapshotInfo implements SnapshotInfo {
 
-  static final Logger LOG = LoggerFactory.getLogger(OMRatisSnapshotInfo.class);
+  static final Logger LOG = LoggerFactory.getLogger(RatisSnapshotInfo.class);
 
   private volatile long term = 0;
   private volatile long snapshotIndex = -1;
@@ -50,6 +50,13 @@
     this.snapshotIndex = newIndex;
   }
 
+  public RatisSnapshotInfo() {}
+
+  public RatisSnapshotInfo(long term, long index) {
+    this.term = term;
+    this.snapshotIndex = index;
+  }
+
   @Override
   public TermIndex getTermIndex() {
     return TermIndex.valueOf(term, snapshotIndex);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/package-info.java
new file mode 100644
index 0000000..f01aef4
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.common.ha.ratis;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ha/ConfUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ha/ConfUtils.java
new file mode 100644
index 0000000..10bc9a0
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ha/ConfUtils.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.ha;
+
+import com.google.common.base.Joiner;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.net.NetUtils;
+import org.slf4j.Logger;
+
+import java.net.InetSocketAddress;
+
+public final class ConfUtils {
+
+  private ConfUtils() {
+
+  }
+
+  /**
+   * Add non empty and non null suffix to a key.
+   */
+  public static String addSuffix(String key, String suffix) {
+    if (suffix == null || suffix.isEmpty()) {
+      return key;
+    }
+    assert !suffix.startsWith(".") :
+        "suffix '" + suffix + "' should not already have '.' prepended.";
+    return key + "." + suffix;
+  }
+
+  /**
+   * Return configuration key of format key.suffix1.suffix2...suffixN.
+   */
+  public static String addKeySuffixes(String key, String... suffixes) {
+    String keySuffix = concatSuffixes(suffixes);
+    return addSuffix(key, keySuffix);
+  }
+
+  /**
+   * Concatenate list of suffix strings '.' separated.
+   */
+  public static String concatSuffixes(String... suffixes) {
+    if (suffixes == null) {
+      return null;
+    }
+    return Joiner.on(".").skipNulls().join(suffixes);
+  }
+
+  /**
+   * Match input address to local address.
+   * Return true if it matches, false otherwsie.
+   */
+  public static boolean isAddressLocal(InetSocketAddress addr) {
+    return NetUtils.isLocalAddress(addr.getAddress());
+  }
+
+  /**
+   * Get the conf key value appended with serviceId and nodeId.
+   * @param conf
+   * @param confKey
+   * @param omServiceID
+   * @param omNodeId
+   * @return conf value.
+   */
+  public static String getConfSuffixedWithServiceId(ConfigurationSource conf,
+      String confKey, String omServiceID, String omNodeId) {
+    String suffixedConfKey = ConfUtils.addKeySuffixes(
+        confKey, omServiceID, omNodeId);
+    String confValue = conf.getTrimmed(suffixedConfKey);
+    if (StringUtils.isNotEmpty(confValue)) {
+      return confValue;
+    }
+    return null;
+  }
+
+  /**
+   * Set Node Specific config keys to generic config keys.
+   * @param nodeSpecificConfigKeys
+   * @param ozoneConfiguration
+   * @param serviceId
+   * @param nodeId
+   */
+  public static void setNodeSpecificConfigs(
+      String[] nodeSpecificConfigKeys, OzoneConfiguration ozoneConfiguration,
+      String serviceId, String nodeId, Logger logger) {
+    for (String confKey : nodeSpecificConfigKeys) {
+      String confValue = getConfSuffixedWithServiceId(
+          ozoneConfiguration, confKey, serviceId, nodeId);
+      if (confValue != null) {
+        logger.info("Setting configuration key {} with value of key {}: {}",
+            confKey, ConfUtils.addKeySuffixes(confKey, serviceId, nodeId),
+            confValue);
+        ozoneConfiguration.set(confKey, confValue);
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
new file mode 100644
index 0000000..5ed5162
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+/**
+ * Classes related to Ozone HA.
+ */
+package org.apache.hadoop.ozone.ha;
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index ae81c84..b67b53d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -145,6 +145,15 @@
     </description>
   </property>
   <property>
+    <name>hdds.datanode.upgrade.layout.inline</name>
+    <value>true</value>
+    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
+    <description>Determines whether to upgrade the DN layout on restart
+      automatically. If set of false, the tool verifies that the current
+      disk format is correct.
+    </description>
+  </property>
+  <property>
     <name>hdds.datanode.dir.du.reserved</name>
     <value/>
     <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
@@ -684,6 +693,15 @@
     </description>
   </property>
   <property>
+    <name>ozone.scm.sequence.id.batch.size</name>
+    <value>1000</value>
+    <tag>OZONE, SCM</tag>
+    <description>
+      SCM allocates sequence id in a batch way. This property determines how many
+      ids will be allocated in a single batch.
+    </description>
+  </property>
+  <property>
     <name>ozone.scm.chunk.size</name>
     <value>4MB</value>
     <tag>OZONE, SCM, CONTAINER, PERFORMANCE</tag>
@@ -1873,6 +1891,211 @@
     <tag>OZONE, HDDS, SECURITY</tag>
     <description>SCM security server port.</description>
   </property>
+  <property>
+    <name>ozone.scm.service.ids</name>
+    <value></value>
+    <tag>OZONE, SCM, HA</tag>
+    <description>
+      Comma-separated list of SCM service Ids. This property allows the client
+      to figure out quorum of OzoneManager address.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.default.service.id</name>
+    <value></value>
+    <tag>OZONE, SCM, HA</tag>
+    <description>
+      Service ID of the SCM. If this is not set fall back to
+      ozone.scm.service.ids to find the service ID it belongs to.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.nodes.EXAMPLESCMSERVICEID</name>
+    <value></value>
+    <tag>OZONE, SCM, HA</tag>
+    <description>
+      Comma-separated list of SCM node Ids for a given SCM service ID (eg.
+      EXAMPLESCMSERVICEID). The SCM service ID should be the value (one of the
+      values if there are multiple) set for the parameter ozone.scm.service.ids.
+
+      Unique identifiers for each SCM Node, delimited by commas. This will be
+      used by SCMs in HA setup to determine all the SCMs
+      belonging to the same SCM in the cluster. For example, if you
+      used “scmService1” as the SCM service ID previously, and you wanted to
+      use “scm1”, “scm2” and "scm3" as the individual IDs of the SCMs,
+      you would configure a property ozone.scm.nodes.scmService1, and its value
+      "scm1,scm2,scm3".
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.node.id</name>
+    <value></value>
+    <tag>OZONE, SCM, HA</tag>
+    <description>
+      The ID of this SCM node. If the SCM node ID is not configured it
+      is determined automatically by matching the local node's address
+      with the configured address.
+
+      If node ID is not deterministic from the configuration, then it is set
+      to the scmId from the SCM version file.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.primordial.node.id</name>
+    <value></value>
+    <tag>OZONE, SCM, HA</tag>
+    <description>
+      optional config, if being set will cause scm --init to only take effect on
+      the specific node and ignore scm --bootstrap cmd.
+      Similarly, scm --init will be ignored on the non-primordial scm nodes.
+      With the config set, applications/admins can safely execute init and
+      bootstrap commands safely on all scm instances.
+
+      If a cluster is upgraded from non-ratis to ratis based SCM, scm --init
+      needs to re-run for switching from
+      non-ratis based SCM to ratis-based SCM on the primary node.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.ratis.enable</name>
+    <value>false</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>Property to enable or disable Ratis server on SCM.
+      Please note - this is a temporary property to disable SCM Ratis server.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.port</name>
+    <value>9865</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>
+      The port number of the SCM's Ratis server.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.grpc.port</name>
+    <value>9866</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>
+      The port number of the SCM's grpc server.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.rpc.type</name>
+    <value>GRPC</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>Ratis supports different kinds of transports like netty, GRPC,
+      Hadoop RPC etc. This picks one of those for this cluster.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.storage.dir</name>
+    <value/>
+    <tag>OZONE, SCM, HA, RATIS, STORAGE</tag>
+    <description>This directory is used for storing SCM's Ratis metadata like
+      logs. If this is not set then default metadata dirs is used. A warning
+      will be logged if this not set. Ideally, this should be mapped to a
+      fast disk like an SSD.
+      If undefined, SCM ratis storage dir will fallback to ozone.metadata.dirs.
+      This fallback approach is not recommended for production environments.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.segment.size</name>
+    <value>16KB</value>
+    <tag>OZONE, SCM, HA, RATIS, PERFORMANCE</tag>
+    <description>The size of the raft segment used by Apache Ratis on SCM.
+      (16 KB by default)
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.segment.preallocated.size</name>
+    <value>16KB</value>
+    <tag>OZONE, SCM, HA, RATIS, PERFORMANCE</tag>
+    <description>The size of the buffer which is preallocated for raft segment
+      used by Apache Ratis on SCM.(16 KB by default)
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.log.appender.queue.num-elements</name>
+    <value>1024</value>
+    <tag>OZONE, DEBUG, SCM, HA, RATIS</tag>
+    <description>Number of operation pending with Raft's Log Worker.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.ratis.log.appender.queue.byte-limit</name>
+    <value>32MB</value>
+    <tag>OZONE, DEBUG, SCM, HA, RATIS</tag>
+    <description>Byte limit for Raft's Log Worker queue.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.ratis.log.purge.gap</name>
+    <value>1000000</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>The minimum gap between log indices for Raft server to purge
+      its log segments after taking snapshot.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.ratis.server.request.timeout</name>
+    <value>3s</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>The timeout duration for SCM's ratis server request.</description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.server.retry.cache.timeout</name>
+    <value>600000ms</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>Retry Cache entry timeout for SCM's ratis server.</description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.minimum.timeout</name>
+    <value>1s</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>The minimum timeout duration for SCM's Ratis server rpc.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.leader.election.minimum.timeout.duration</name>
+    <value>1s</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>The minimum timeout duration for SCM ratis leader election.
+      Default is 1s.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.server.failure.timeout.duration</name>
+    <value>120s</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>The timeout duration for ratis server failure detection,
+      once the threshold has reached, the ratis state machine will be informed
+      about the failure in the ratis ring.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.scm.ratis.server.role.check.interval</name>
+    <value>15s</value>
+    <tag>OZONE, SCM, HA, RATIS</tag>
+    <description>The interval between SCM leader performing a role
+      check on its ratis server. Ratis server informs SCM if it
+      loses the leader role. The scheduled check is an secondary
+      check to ensure that the leader role is updated periodically
+      .</description>
+  </property>
 
   <property>
     <name>hdds.metadata.dir</name>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
index 7530bd0..b97c98c 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
@@ -19,17 +19,23 @@
 
 import java.net.InetSocketAddress;
 import java.nio.file.Paths;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 
 import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
 import static org.hamcrest.core.Is.is;
 import org.junit.Assert;
 import static org.junit.Assert.assertThat;
@@ -86,7 +92,7 @@
     assertThat(addresses.size(), is(1));
     addr = addresses.iterator().next();
     assertThat(addr.getHostName(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT));
+    assertThat(addr.getPort(), is(OZONE_SCM_DATANODE_PORT_DEFAULT));
 
     // Verify valid hostname setup
     conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1");
@@ -94,7 +100,7 @@
     assertThat(addresses.size(), is(1));
     addr = addresses.iterator().next();
     assertThat(addr.getHostName(), is("scm1"));
-    assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT));
+    assertThat(addr.getPort(), is(OZONE_SCM_DATANODE_PORT_DEFAULT));
 
     // Verify valid hostname and port
     conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234");
@@ -174,4 +180,40 @@
     }
   }
 
+
+  @Test
+  public void testGetSCMAddressesWithHAConfig() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String scmServiceId = "scmserviceId";
+    String[] nodes = new String[]{"scm1", "scm2", "scm3"};
+    conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
+    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId,
+        "scm1,scm2,scm3");
+
+    int port = 9880;
+    List<String> expected = new ArrayList<>();
+    for (String nodeId : nodes) {
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY,
+          scmServiceId, nodeId), "scm");
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
+          scmServiceId, nodeId), ++port);
+      expected.add("scm" + ":" + port);
+    }
+
+    Collection<InetSocketAddress> scmAddressList =
+        HddsUtils.getSCMAddresses(conf);
+
+    Assert.assertNotNull(scmAddressList);
+    Assert.assertEquals(3, scmAddressList.size());
+
+    Iterator<InetSocketAddress> it = scmAddressList.iterator();
+    while (it.hasNext()) {
+      InetSocketAddress next = it.next();
+      expected.remove(next.getHostName()  + ":" + next.getPort());
+    }
+
+    Assert.assertTrue(expected.size() == 0);
+
+  }
+
 }
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
new file mode 100644
index 0000000..a88e1ca
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.ha.ConfUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
+
+public class TestSCMNodeInfo {
+
+  private OzoneConfiguration conf = new OzoneConfiguration();
+  private String scmServiceId = "scmserviceId";
+  private String[] nodes = new String[]{"scm1", "scm2", "scm3"};
+
+  @Before
+  public void setup() {
+    conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
+    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId,
+        "scm1,scm2,scm3");
+  }
+
+  @Test
+  public void testScmHANodeInfo() {
+    int port = 9880;
+    for (String nodeId : nodes) {
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost");
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:" + ++port);
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+          scmServiceId, nodeId), port);
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:" + ++port);
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
+          scmServiceId, nodeId), port);
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:" + ++port);
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_PORT_KEY,
+          scmServiceId, nodeId), port);
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:" + ++port);
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
+          scmServiceId, nodeId), port);
+
+    }
+
+    List<SCMNodeInfo> scmNodeInfos = SCMNodeInfo.buildNodeInfo(conf);
+
+    port = 9880;
+
+    int count = 1;
+    for (SCMNodeInfo scmNodeInfo : scmNodeInfos) {
+      Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId());
+      Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId());
+      Assert.assertEquals("localhost:" + ++port,
+          scmNodeInfo.getBlockClientAddress());
+      Assert.assertEquals("localhost:" + ++port,
+          scmNodeInfo.getScmSecurityAddress());
+      Assert.assertEquals("localhost:" + ++port,
+          scmNodeInfo.getScmClientAddress());
+      Assert.assertEquals("localhost:" + ++port,
+          scmNodeInfo.getScmDatanodeAddress());
+    }
+  }
+
+  @Test
+  public void testSCMHANodeInfoWithDefaultPorts() {
+    for (String nodeId : nodes) {
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost");
+    }
+
+    List<SCMNodeInfo> scmNodeInfos = SCMNodeInfo.buildNodeInfo(conf);
+
+    int count = 1;
+    for (SCMNodeInfo scmNodeInfo : scmNodeInfos) {
+      Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId());
+      Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId());
+      Assert.assertEquals("localhost:" + OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT,
+          scmNodeInfo.getBlockClientAddress());
+      Assert.assertEquals("localhost:" +
+              OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT,
+          scmNodeInfo.getScmSecurityAddress());
+      Assert.assertEquals("localhost:" + OZONE_SCM_CLIENT_PORT_DEFAULT,
+          scmNodeInfo.getScmClientAddress());
+      Assert.assertEquals("localhost:" + OZONE_SCM_DATANODE_PORT_DEFAULT,
+          scmNodeInfo.getScmDatanodeAddress());
+    }
+
+
+  }
+
+  @Test(expected = ConfigurationException.class)
+  public void testSCMHANodeInfoWithMissingSCMAddress() {
+    conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY,
+        scmServiceId, "scm1"), "localhost");
+    conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY,
+        scmServiceId, "scm1"), "localhost");
+
+    SCMNodeInfo.buildNodeInfo(conf);
+  }
+
+  @Test
+  public void testNonHAWithRestDefaults() {
+    OzoneConfiguration config = new OzoneConfiguration();
+
+    config.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");
+
+    List< SCMNodeInfo > scmNodeInfos = SCMNodeInfo.buildNodeInfo(config);
+
+    Assert.assertNotNull(scmNodeInfos);
+    Assert.assertTrue(scmNodeInfos.size() == 1);
+    Assert.assertEquals("localhost:" + OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT,
+        scmNodeInfos.get(0).getBlockClientAddress());
+    Assert.assertEquals("localhost:" + OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT,
+        scmNodeInfos.get(0).getScmSecurityAddress());
+    Assert.assertEquals("localhost:" + OZONE_SCM_CLIENT_PORT_DEFAULT,
+        scmNodeInfos.get(0).getScmClientAddress());
+    Assert.assertEquals("localhost:" + OZONE_SCM_DATANODE_PORT_DEFAULT,
+        scmNodeInfos.get(0).getScmDatanodeAddress());
+  }
+
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/package-info.java
similarity index 87%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/package-info.java
index 4944017..33ee79e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,8 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
-
 /**
- * This package contains classes related to Apache Ratis for SCM.
+ Test cases for SCM HA Config loading.
  */
+package org.apache.hadoop.hdds.scm.ha;
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
index 316c867..5d4b477 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java
@@ -55,5 +55,10 @@
    */
   TimeUnit timeUnit() default TimeUnit.MILLISECONDS;
 
+  /**
+   * If type == SIZE the unit should be defined with this attribute.
+   */
+  StorageUnit sizeUnit() default StorageUnit.BYTES;
+
   ConfigTag[] tags();
 }
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
index 3d1d689..39dcaba 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
@@ -43,5 +43,6 @@
   S3GATEWAY,
   DATANODE,
   RECON,
-  DELETION
+  DELETION,
+  HA
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index cb34e66..325736e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -69,6 +69,7 @@
 import com.sun.jmx.mbeanserver.Introspector;
 import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getX509Certificate;
 import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
+import static org.apache.hadoop.hdds.utils.HAUtils.checkSecurityAndSCMHAEnabled;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import org.bouncycastle.pkcs.PKCS10CertificationRequest;
@@ -187,6 +188,7 @@
 
   public void start(OzoneConfiguration configuration) {
     setConfiguration(configuration);
+    checkSecurityAndSCMHAEnabled(conf);
     start();
   }
 
@@ -284,7 +286,7 @@
   private void startRatisForTest() throws IOException {
     String scmId = "scm-01";
     String clusterId = "clusterId";
-    datanodeStateMachine.getContainer().start(scmId);
+    datanodeStateMachine.getContainer().start(clusterId);
     MutableVolumeSet volumeSet =
         getDatanodeStateMachine().getContainer().getVolumeSet();
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index cfbec1c..55002c4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -87,7 +87,7 @@
   private final ProtocolMessageMetrics<ProtocolMessageEnum> protocolMetrics;
   private OzoneProtocolMessageDispatcher<ContainerCommandRequestProto,
       ContainerCommandResponseProto, ProtocolMessageEnum> dispatcher;
-  private String scmID;
+  private String clusterId;
   private ContainerMetrics metrics;
   private final TokenVerifier tokenVerifier;
   private final boolean isBlockTokenEnabled;
@@ -551,12 +551,12 @@
   }
 
   @Override
-  public void setScmId(String scmId) {
-    Preconditions.checkNotNull(scmId, "scmId Cannot be null");
-    if (this.scmID == null) {
-      this.scmID = scmId;
+  public void setClusterId(String clusterId) {
+    Preconditions.checkNotNull(clusterId, "clusterId Cannot be null");
+    if (this.clusterId == null) {
+      this.clusterId = clusterId;
       for (Map.Entry<ContainerType, Handler> handlerMap : handlers.entrySet()) {
-        handlerMap.getValue().setScmID(scmID);
+        handlerMap.getValue().setClusterID(clusterId);
       }
     }
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
index ee0b6bc..77145ca 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
@@ -82,5 +82,5 @@
    * If scmId is not set, this will set scmId, otherwise it is a no-op.
    * @param scmId
    */
-  void setScmId(String scmId);
+  void setClusterId(String scmId);
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 4ba7572..e585234 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -48,7 +48,7 @@
   protected final ConfigurationSource conf;
   protected final ContainerSet containerSet;
   protected final VolumeSet volumeSet;
-  protected String scmID;
+  protected String clusterId;
   protected final ContainerMetrics metrics;
   protected String datanodeId;
   private Consumer<ContainerReplicaProto> icrSender;
@@ -186,8 +186,8 @@
   public abstract void deleteBlock(Container container, BlockData blockData)
       throws IOException;
 
-  public void setScmID(String scmId) {
-    this.scmID = scmId;
+  public void setClusterID(String clusterID) {
+    this.clusterId = clusterID;
   }
 
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 8434e5b..3051638 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -23,6 +23,7 @@
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Queue;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
@@ -31,6 +32,7 @@
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Lock;
@@ -116,6 +118,18 @@
   private final AtomicLong threadPoolNotAvailableCount;
 
   /**
+   * term of latest leader SCM, extract from SCMCommand.
+   *
+   * Only leader SCM (both latest and stale) can send out SCMCommand,
+   * which will save its term in SCMCommand. Since latest leader SCM
+   * always has the highest term, term can be used to detect SCMCommand
+   * from stale leader SCM.
+   *
+   * For non-HA mode, term of SCMCommand will be 0.
+   */
+  private Optional<Long> termOfLeaderSCM = Optional.empty();
+
+  /**
    * Starting with a 2 sec heartbeat frequency which will be updated to the
    * real HB frequency after scm registration. With this method the
    * initial registration could be significant faster.
@@ -586,6 +600,65 @@
   }
 
   /**
+   * After startup, datanode needs detect latest leader SCM before handling
+   * any SCMCommand, so that it won't be disturbed by stale leader SCM.
+   *
+   * The rule is: after majority SCMs are in HEARTBEAT state and has
+   * heard from leader SCMs (commandQueue is not empty), datanode will init
+   * termOfLeaderSCM with the max term found in commandQueue.
+   *
+   * The init process also works for non-HA mode. In that case, term of all
+   * SCMCommands will be 0.
+   */
+  private void initTermOfLeaderSCM() {
+    // only init once
+    if (termOfLeaderSCM.isPresent()) {
+      return;
+    }
+
+    AtomicInteger scmNum = new AtomicInteger(0);
+    AtomicInteger activeScmNum = new AtomicInteger(0);
+
+    getParent().getConnectionManager().getValues()
+        .forEach(endpoint -> {
+          if (endpoint.isPassive()) {
+            return;
+          }
+          scmNum.incrementAndGet();
+          if (endpoint.getState()
+              == EndpointStateMachine.EndPointStates.HEARTBEAT) {
+            activeScmNum.incrementAndGet();
+          }
+        });
+
+    // majority SCMs should be in HEARTBEAT state.
+    if (activeScmNum.get() < scmNum.get() / 2 + 1) {
+      return;
+    }
+
+    // if commandQueue is not empty, init termOfLeaderSCM
+    // with the largest term found in commandQueue
+    commandQueue.stream()
+        .mapToLong(SCMCommand::getTerm)
+        .max()
+        .ifPresent(term -> termOfLeaderSCM = Optional.of(term));
+  }
+
+  /**
+   * monotonically increase termOfLeaderSCM.
+   * Always record the latest term that has seen.
+   */
+  private void updateTermOfLeaderSCM(SCMCommand<?> command) {
+    if (!termOfLeaderSCM.isPresent()) {
+      LOG.error("should init termOfLeaderSCM before update it.");
+      return;
+    }
+
+    termOfLeaderSCM = Optional.of(
+        Long.max(termOfLeaderSCM.get(), command.getTerm()));
+  }
+
+  /**
    * Returns the next command or null if it is empty.
    *
    * @return SCMCommand or Null.
@@ -593,7 +666,26 @@
   public SCMCommand getNextCommand() {
     lock.lock();
     try {
-      return commandQueue.poll();
+      initTermOfLeaderSCM();
+      if (!termOfLeaderSCM.isPresent()) {
+        return null;      // not ready yet
+      }
+
+      while (true) {
+        SCMCommand<?> command = commandQueue.poll();
+        if (command == null) {
+          return null;
+        }
+
+        updateTermOfLeaderSCM(command);
+        if (command.getTerm() == termOfLeaderSCM.get()) {
+          return command;
+        }
+
+        LOG.warn("Detect and drop a SCMCommand {} from stale leader SCM," +
+            " stale term {}, latest term {}.",
+            command, command.getTerm(), termOfLeaderSCM.get());
+      }
     } finally {
       lock.unlock();
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 4e436c4..32dd3e3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -271,6 +271,9 @@
         DeleteBlocksCommand db = DeleteBlocksCommand
             .getFromProtobuf(
                 commandResponseProto.getDeleteBlocksCommandProto());
+        if (commandResponseProto.hasTerm()) {
+          db.setTerm(commandResponseProto.getTerm());
+        }
         if (!db.blocksTobeDeleted().isEmpty()) {
           if (LOG.isDebugEnabled()) {
             LOG.debug(DeletedContainerBlocksSummary
@@ -284,6 +287,9 @@
         CloseContainerCommand closeContainer =
             CloseContainerCommand.getFromProtobuf(
                 commandResponseProto.getCloseContainerCommandProto());
+        if (commandResponseProto.hasTerm()) {
+          closeContainer.setTerm(commandResponseProto.getTerm());
+        }
         if (LOG.isDebugEnabled()) {
           LOG.debug("Received SCM container close request for container {}",
               closeContainer.getContainerID());
@@ -294,6 +300,9 @@
         ReplicateContainerCommand replicateContainerCommand =
             ReplicateContainerCommand.getFromProtobuf(
                 commandResponseProto.getReplicateContainerCommandProto());
+        if (commandResponseProto.hasTerm()) {
+          replicateContainerCommand.setTerm(commandResponseProto.getTerm());
+        }
         if (LOG.isDebugEnabled()) {
           LOG.debug("Received SCM container replicate request for container {}",
               replicateContainerCommand.getContainerID());
@@ -304,6 +313,9 @@
         DeleteContainerCommand deleteContainerCommand =
             DeleteContainerCommand.getFromProtobuf(
                 commandResponseProto.getDeleteContainerCommandProto());
+        if (commandResponseProto.hasTerm()) {
+          deleteContainerCommand.setTerm(commandResponseProto.getTerm());
+        }
         if (LOG.isDebugEnabled()) {
           LOG.debug("Received SCM delete container request for container {}",
               deleteContainerCommand.getContainerID());
@@ -314,6 +326,9 @@
         CreatePipelineCommand createPipelineCommand =
             CreatePipelineCommand.getFromProtobuf(
                 commandResponseProto.getCreatePipelineCommandProto());
+        if (commandResponseProto.hasTerm()) {
+          createPipelineCommand.setTerm(commandResponseProto.getTerm());
+        }
         if (LOG.isDebugEnabled()) {
           LOG.debug("Received SCM create pipeline request {}",
               createPipelineCommand.getPipelineID());
@@ -324,6 +339,9 @@
         ClosePipelineCommand closePipelineCommand =
             ClosePipelineCommand.getFromProtobuf(
                 commandResponseProto.getClosePipelineCommandProto());
+        if (commandResponseProto.hasTerm()) {
+          closePipelineCommand.setTerm(commandResponseProto.getTerm());
+        }
         if (LOG.isDebugEnabled()) {
           LOG.debug("Received SCM close pipeline request {}",
               closePipelineCommand.getPipelineID());
@@ -334,6 +352,10 @@
         SetNodeOperationalStateCommand setNodeOperationalStateCommand =
             SetNodeOperationalStateCommand.getFromProtobuf(
                 commandResponseProto.getSetNodeOperationalStateCommandProto());
+        if (commandResponseProto.hasTerm()) {
+          setNodeOperationalStateCommand.setTerm(
+              commandResponseProto.getTerm());
+        }
         if (LOG.isDebugEnabled()) {
           LOG.debug("Received SCM set operational state command. State: {} " +
               "Expiry: {}", setNodeOperationalStateCommand.getOpState(),
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 6c53756..4762c78 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -109,7 +109,7 @@
           }
 
           // Start the container services after getting the version information
-          ozoneContainer.start(scmId);
+          ozoneContainer.start(clusterId);
         }
         EndpointStateMachine.EndPointStates nextState =
             rpcEndPoint.getState().getNextState();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 09d1ed0..72859e1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -793,12 +793,15 @@
         clientId, server.getId(), nextCallId(), group);
 
     RaftClientReply reply;
+    LOG.debug("Received addGroup request for pipeline {}", pipelineID);
+
     try {
       reply = server.groupManagement(request);
     } catch (Exception e) {
       throw new IOException(e.getMessage(), e);
     }
     processReply(reply);
+    LOG.info("Created group {}", pipelineID);
   }
 
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 3672b51..9d3810e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
 import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 
@@ -177,6 +178,7 @@
       clusterId, Logger logger) {
     File hddsRoot = hddsVolume.getHddsRootDir();
     String volumeRoot = hddsRoot.getPath();
+    File clusterDir = new File(hddsRoot, clusterId);
     File scmDir = new File(hddsRoot, scmId);
 
     try {
@@ -196,18 +198,27 @@
     } else if (hddsFiles.length == 1) {
       // DN started for first time or this is a newly added volume.
       // So we create scm directory.
-      if (!scmDir.mkdir()) {
-        logger.error("Unable to create scmDir {}", scmDir);
+      if (!clusterDir.mkdir()) {
+        logger.error("Unable to create scmDir {}", clusterDir);
         return false;
       }
       return true;
     } else if(hddsFiles.length == 2) {
-      // The files should be Version and SCM directory
       if (scmDir.exists()) {
+        String msg = "Volume " + volumeRoot +
+            " is in Inconsistent state, and contains the" +
+            "SCM Directory:" + scmDir.getAbsolutePath() +
+            " which is a older format, please upgrade the volume.";
+        logger.error(msg);
+        ExitUtil.terminate(-2, msg);
+        return false;
+      }
+      // The files should be Version and SCM directory
+      if (clusterDir.exists()) {
         return true;
       } else {
-        logger.error("Volume {} is in Inconsistent state, expected scm " +
-                "directory {} does not exist", volumeRoot, scmDir
+        logger.error("Volume {} is in Inconsistent state, expected cluster " +
+                "directory {} does not exist", volumeRoot, clusterDir
             .getAbsolutePath());
         return false;
       }
@@ -215,7 +226,7 @@
       // The hdds root dir should always have 2 files. One is Version file
       // and other is SCM directory.
       logger.error("The hdds root dir {} should always have 2 files. " +
-              "One is Version file and other is SCM directory. " +
+              "One is Version file and other is Cluster directory. " +
               "Please remove any other extra files from the directory " +
               "so that DataNode startup can proceed.",
               hddsRoot.getAbsolutePath());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 1dee1ba..afc7e62 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -52,9 +52,9 @@
  * <p>
  * The disk layout per volume is as follows:
  * <p>../hdds/VERSION
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<containerID
  * >>/metadata}
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<containerID
  * >>/<<dataDir>>}
  * <p>
  * Each hdds volume has its own VERSION file. The hdds volume will have one
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index b8c6067..159b38e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -138,6 +138,18 @@
     initializeVolumeSet();
   }
 
+  public MutableVolumeSet(ConfigurationSource conf) throws IOException {
+    this.datanodeUuid = null;
+    this.clusterID = null;
+    this.conf = conf;
+    this.volumeSetRWLock = new ReentrantReadWriteLock();
+    this.volumeChecker = getVolumeChecker(conf);
+    this.diskCheckerservice = null;
+    this.periodicDiskChecker = null;
+    this.usageCheckFactory = null;
+    initializeVolumeSet();
+  }
+
   public void setFailedVolumeListener(Runnable runnable) {
     failedVolumeListener = runnable;
   }
@@ -459,9 +471,16 @@
   }
 
   private void stopDiskChecker() {
-    periodicDiskChecker.cancel(true);
-    volumeChecker.shutdownAndWait(0, TimeUnit.SECONDS);
-    diskCheckerservice.shutdownNow();
+    if (periodicDiskChecker != null) {
+      periodicDiskChecker.cancel(true);
+    }
+    if (volumeChecker != null) {
+      volumeChecker.shutdownAndWait(0, TimeUnit.SECONDS);
+    }
+
+    if (diskCheckerservice != null) {
+      diskCheckerservice.shutdownNow();
+    }
   }
 
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 5079811..6760a27 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -97,11 +97,11 @@
 
   @Override
   public void create(VolumeSet volumeSet, VolumeChoosingPolicy
-      volumeChoosingPolicy, String scmId) throws StorageContainerException {
+      volumeChoosingPolicy, String clusterId) throws StorageContainerException {
     Preconditions.checkNotNull(volumeChoosingPolicy, "VolumeChoosingPolicy " +
         "cannot be null");
     Preconditions.checkNotNull(volumeSet, "VolumeSet cannot be null");
-    Preconditions.checkNotNull(scmId, "scmId cannot be null");
+    Preconditions.checkNotNull(clusterId, "clusterId cannot be null");
 
     File containerMetaDataPath = null;
     //acquiring volumeset read lock
@@ -115,11 +115,11 @@
       long containerID = containerData.getContainerID();
 
       containerMetaDataPath = KeyValueContainerLocationUtil
-          .getContainerMetaDataPath(hddsVolumeDir, scmId, containerID);
+          .getContainerMetaDataPath(hddsVolumeDir, clusterId, containerID);
       containerData.setMetadataPath(containerMetaDataPath.getPath());
 
       File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(
-          hddsVolumeDir, scmId, containerID);
+          hddsVolumeDir, clusterId, containerID);
 
       // Check if it is new Container.
       ContainerUtils.verifyIsNewContainer(containerMetaDataPath);
@@ -173,20 +173,20 @@
    * Set all of the path realted container data fields based on the name
    * conventions.
    *
-   * @param scmId
+   * @param clusterId
    * @param containerVolume
    * @param hddsVolumeDir
    */
-  public void populatePathFields(String scmId,
+  public void populatePathFields(String clusterId,
       HddsVolume containerVolume, String hddsVolumeDir) {
 
     long containerId = containerData.getContainerID();
 
     File containerMetaDataPath = KeyValueContainerLocationUtil
-        .getContainerMetaDataPath(hddsVolumeDir, scmId, containerId);
+        .getContainerMetaDataPath(hddsVolumeDir, clusterId, containerId);
 
     File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath(
-        hddsVolumeDir, scmId, containerId);
+        hddsVolumeDir, clusterId, containerId);
     File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(
         containerMetaDataPath, containerId);
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 617a8a0..fe3cc5c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -259,7 +259,7 @@
     boolean created = false;
     try (AutoCloseableLock l = containerCreationLock.acquire()) {
       if (containerSet.getContainer(containerID) == null) {
-        newContainer.create(volumeSet, volumeChoosingPolicy, scmID);
+        newContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
         created = containerSet.addContainer(newContainer);
       } else {
         // The create container request for an already existing container can
@@ -288,7 +288,7 @@
       HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
           .getVolumesList(), container.getContainerData().getMaxSize());
       String hddsVolumeDir = containerVolume.getHddsRootDir().toString();
-      container.populatePathFields(scmID, containerVolume, hddsVolumeDir);
+      container.populatePathFields(clusterId, containerVolume, hddsVolumeDir);
     } finally {
       volumeSet.readUnlock();
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
index 0c7a04e..ad1673a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java
@@ -36,16 +36,16 @@
    * Returns Container Metadata Location.
    * @param hddsVolumeDir base dir of the hdds volume where scm directories
    *                      are stored
-   * @param scmId
+   * @param clusterId
    * @param containerId
    * @return containerMetadata Path to container metadata location where
    * .container file will be stored.
    */
   public static File getContainerMetaDataPath(String hddsVolumeDir,
-                                              String scmId,
+                                              String clusterId,
                                               long containerId) {
     String containerMetaDataPath =
-        getBaseContainerLocation(hddsVolumeDir, scmId,
+        getBaseContainerLocation(hddsVolumeDir, clusterId,
             containerId);
     containerMetaDataPath = containerMetaDataPath + File.separator +
         OzoneConsts.CONTAINER_META_PATH;
@@ -56,35 +56,36 @@
   /**
    * Returns Container Chunks Location.
    * @param baseDir
-   * @param scmId
+   * @param clusterId
    * @param containerId
    * @return chunksPath
    */
-  public static File getChunksLocationPath(String baseDir, String scmId,
+  public static File getChunksLocationPath(String baseDir, String clusterId,
                                            long containerId) {
-    String chunksPath = getBaseContainerLocation(baseDir, scmId, containerId)
-        + File.separator + OzoneConsts.STORAGE_DIR_CHUNKS;
+    String chunksPath =
+        getBaseContainerLocation(baseDir, clusterId, containerId)
+            + File.separator + OzoneConsts.STORAGE_DIR_CHUNKS;
     return new File(chunksPath);
   }
 
   /**
    * Returns base directory for specified container.
    * @param hddsVolumeDir
-   * @param scmId
+   * @param clusterId
    * @param containerId
    * @return base directory for container.
    */
   private static String getBaseContainerLocation(String hddsVolumeDir,
-                                                 String scmId,
+                                                 String clusterId,
                                                  long containerId) {
     Preconditions.checkNotNull(hddsVolumeDir, "Base Directory cannot be null");
-    Preconditions.checkNotNull(scmId, "scmUuid cannot be null");
+    Preconditions.checkNotNull(clusterId, "scmUuid cannot be null");
     Preconditions.checkState(containerId >= 0,
         "Container Id cannot be negative.");
 
     String containerSubDirectory = getContainerSubDirectory(containerId);
 
-    String containerMetaDataPath = hddsVolumeDir  + File.separator + scmId +
+    String containerMetaDataPath = hddsVolumeDir  + File.separator + clusterId +
         File.separator + Storage.STORAGE_DIR_CURRENT + File.separator +
         containerSubDirectory + File.separator + containerId;
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index 20b45b1..1ab4c3b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -21,11 +21,16 @@
 import java.io.File;
 import java.io.FileFilter;
 import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collections;
 
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.ozone.common.Storage;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -39,6 +44,7 @@
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.yaml.snakeyaml.Yaml;
 
 /**
  * Class used to read .container files from Volume and build container map.
@@ -46,22 +52,22 @@
  * Layout of the container directory on disk is as follows:
  *
  * <p>../hdds/VERSION
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<containerID
  * >/metadata/<<containerID>>.container}
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<containerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<containerID
  * >/<<dataPath>>}
  * <p>
  * Some ContainerTypes will have extra metadata other than the .container
  * file. For example, KeyValueContainer will have a .db file. This .db file
  * will also be stored in the metadata folder along with the .container file.
  * <p>
- * {@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<KVcontainerID
+ * {@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<KVcontainerID
  * >/metadata/<<KVcontainerID>>.db}
  * <p>
  * Note that the {@literal <<dataPath>>} is dependent on the ContainerType.
  * For KeyValueContainers, the data is stored in a "chunks" folder. As such,
  * the {@literal <<dataPath>>} layout for KeyValueContainers is:
- * <p>{@literal ../hdds/<<scmUuid>>/current/<<containerDir>>/<<KVcontainerID
+ * <p>{@literal ../hdds/<<clusterUuid>>/current/<<containerDir>>/<<KVcontainerID
  * >/chunks/<<chunksFile>>}
  *
  */
@@ -74,6 +80,7 @@
   private final ConfigurationSource config;
   private final File hddsVolumeDir;
   private final MutableVolumeSet volumeSet;
+  private final boolean isInUpgradeMode;
 
   public ContainerReader(
       MutableVolumeSet volSet, HddsVolume volume, ContainerSet cset,
@@ -85,46 +92,57 @@
     this.containerSet = cset;
     this.config = conf;
     this.volumeSet = volSet;
+    this.isInUpgradeMode =
+        conf.getBoolean(ScmConfigKeys.HDDS_DATANODE_UPGRADE_LAYOUT_INLINE,
+            ScmConfigKeys.HDDS_DATANODE_UPGRADE_LAYOUT_INLINE_DEFAULT);
+    LOG.info("Running in upgrade mode:{}", this.isInUpgradeMode);
   }
 
+
+  private File getClusterDir() {
+    File hddsVolumeRootDir = hddsVolume.getHddsRootDir();
+    return new File(hddsVolumeRootDir, hddsVolume.getClusterID());
+  }
   @Override
   public void run() {
     try {
       readVolume(hddsVolumeDir);
-    } catch (RuntimeException ex) {
-      LOG.error("Caught a Run time exception during reading container files" +
-          " from Volume {} {}", hddsVolumeDir, ex);
+    } catch (Throwable t) {
+      LOG.error("Caught an exception during reading container files" +
+          " from Volume {} {}", hddsVolumeDir, t);
+      volumeSet.failVolume(hddsVolumeDir.getPath());
     }
   }
 
-  public void readVolume(File hddsVolumeRootDir) {
+  public void readVolume(File hddsVolumeRootDir) throws Exception {
     Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" +
         "cannot be null");
 
-    //filtering scm directory
-    File[] scmDir = hddsVolumeRootDir.listFiles(new FileFilter() {
+    //filtering storage directory
+    File[] storageDir = hddsVolumeRootDir.listFiles(new FileFilter() {
       @Override
       public boolean accept(File pathname) {
         return pathname.isDirectory();
       }
     });
 
-    if (scmDir == null) {
+    if (storageDir == null) {
       LOG.error("IO error for the volume {}, skipped loading",
           hddsVolumeRootDir);
       volumeSet.failVolume(hddsVolumeRootDir.getPath());
       return;
     }
 
-    if (scmDir.length > 1) {
+    if (storageDir.length > 1) {
       LOG.error("Volume {} is in Inconsistent state", hddsVolumeRootDir);
       volumeSet.failVolume(hddsVolumeRootDir.getPath());
       return;
     }
 
     LOG.info("Start to verify containers on volume {}", hddsVolumeRootDir);
-    for (File scmLoc : scmDir) {
-      File currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT);
+    for (File storageLoc : storageDir) {
+      File location = preProcessStorageLoc(storageLoc);
+      File currentDir = new File(location, Storage.STORAGE_DIR_CURRENT);
       File[] containerTopDirs = currentDir.listFiles();
       if (containerTopDirs != null) {
         for (File containerTopDir : containerTopDirs) {
@@ -136,7 +154,7 @@
                     containerDir);
                 long containerID = ContainerUtils.getContainerID(containerDir);
                 if (containerFile.exists()) {
-                  verifyContainerFile(containerID, containerFile);
+                  verifyContainerFile(storageLoc, containerID, containerFile);
                 } else {
                   LOG.error("Missing .container file for ContainerID: {}",
                       containerDir.getName());
@@ -150,7 +168,37 @@
     LOG.info("Finish verifying containers on volume {}", hddsVolumeRootDir);
   }
 
-  private void verifyContainerFile(long containerID, File containerFile) {
+  public File preProcessStorageLoc(File storageLoc) throws Exception {
+    File clusterDir = getClusterDir();
+
+    if (!isInUpgradeMode) {
+      Preconditions.checkArgument(clusterDir.exists(),
+          "Storage Dir:" + clusterDir + " doesn't exists");
+      Preconditions.checkArgument(storageLoc.equals(clusterDir),
+          "configured storage location path" + storageLoc +
+              " does not container the clusterId:" +
+              hddsVolume.getClusterID());
+      return storageLoc;
+    }
+
+    if (clusterDir.exists()) {
+      return storageLoc;
+    }
+
+    try {
+      LOG.info("Storage dir based on clusterId doesn't exists." +
+          "Renaming storage location:{} to {}", storageLoc, clusterDir);
+      NativeIO.renameTo(storageLoc, clusterDir);
+      return clusterDir;
+    } catch (Throwable t) {
+      LOG.error("DN Layout upgrade failed. Renaming of storage" +
+          "location:{} to {} failed", storageLoc, clusterDir, t);
+      throw t;
+    }
+  }
+
+  private void verifyContainerFile(File storageLoc, long containerID,
+                                   File containerFile) {
     try {
       ContainerData containerData = ContainerDataYaml.readContainerFile(
           containerFile);
@@ -159,7 +207,7 @@
             "Skipping loading of this container.", containerFile);
         return;
       }
-      verifyAndFixupContainerData(containerData);
+      verifyAndFixupContainerData(storageLoc, containerData);
     } catch (IOException ex) {
       LOG.error("Failed to parse ContainerFile for ContainerID: {}",
           containerID, ex);
@@ -167,25 +215,60 @@
   }
 
   /**
+   * This function upgrades the container layout in following steps.
+   * a) Converts the chunk and metadata path to the new clusterID
+   *    based location.
+   * b) Re-computes the new container checksum.
+   * b) Persists the new container layout to disk.
+   * @param storageLoc
+   * @param kvContainerData
+   * @return upgraded KeyValueContainer
+   * @throws IOException
+   */
+  public KeyValueContainer upgradeContainerLayout(File storageLoc,
+      KeyValueContainerData kvContainerData) throws IOException {
+    kvContainerData.setMetadataPath(
+        findNormalizedPath(storageLoc,
+            kvContainerData.getMetadataPath()));
+    kvContainerData.setChunksPath(
+        findNormalizedPath(storageLoc,
+            kvContainerData.getChunksPath()));
+
+    Yaml yaml = ContainerDataYaml.getYamlForContainerType(
+        kvContainerData.getContainerType());
+    kvContainerData.computeAndSetChecksum(yaml);
+
+    KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
+    KeyValueContainer kvContainer = new KeyValueContainer(
+        kvContainerData, config);
+    kvContainer.update(Collections.emptyMap(), true);
+    return kvContainer;
+  }
+
+  /**
    * verify ContainerData loaded from disk and fix-up stale members.
    * Specifically blockCommitSequenceId, delete related metadata
    * and bytesUsed
    * @param containerData
    * @throws IOException
    */
-  public void verifyAndFixupContainerData(ContainerData containerData)
-      throws IOException {
+  public void verifyAndFixupContainerData(File storageLoc,
+      ContainerData containerData) throws IOException {
     switch (containerData.getContainerType()) {
     case KeyValueContainer:
       if (containerData instanceof KeyValueContainerData) {
         KeyValueContainerData kvContainerData = (KeyValueContainerData)
             containerData;
         containerData.setVolume(hddsVolume);
-
-        KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
-        KeyValueContainer kvContainer = new KeyValueContainer(
-            kvContainerData, config);
-
+        KeyValueContainer kvContainer = null;
+        if (isInUpgradeMode) {
+          kvContainer =
+              upgradeContainerLayout(storageLoc, kvContainerData);
+        } else {
+          KeyValueContainerUtil.parseKVContainerData(kvContainerData, config);
+          kvContainer = new KeyValueContainer(
+              kvContainerData, config);
+        }
         containerSet.addContainer(kvContainer);
       } else {
         throw new StorageContainerException("Container File is corrupted. " +
@@ -200,4 +283,18 @@
           ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE);
     }
   }
+
+  public String findNormalizedPath(File storageLoc, String path) {
+    Path p = Paths.get(path);
+    Path relativePath = storageLoc.toPath().relativize(p);
+    Path newPath = getClusterDir().toPath().resolve(relativePath);
+
+    if (!isInUpgradeMode) {
+      Preconditions.checkArgument(newPath.toFile().exists(),
+          "modified path:" + newPath + " doesn't exists");
+    }
+
+    LOG.debug("new Normalized Path is:{}", newPath);
+    return newPath.toAbsolutePath().toString();
+  }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 3ecddac..8b37c5b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -25,6 +25,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Consumer;
 
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -86,6 +87,7 @@
   private List<ContainerDataScanner> dataScanners;
   private final BlockDeletingService blockDeletingService;
   private final GrpcTlsConfig tlsClientConfig;
+  private final AtomicBoolean isStarted;
   private final ReplicationServer replicationServer;
   private DatanodeDetails datanodeDetails;
 
@@ -110,7 +112,7 @@
     containerSet = new ContainerSet();
     metadataScanner = null;
 
-    buildContainerSet();
+    buildContainerSet(volumeSet, containerSet, config);
     final ContainerMetrics metrics = ContainerMetrics.create(conf);
     handlers = Maps.newHashMap();
 
@@ -167,6 +169,8 @@
             TimeUnit.MILLISECONDS, config);
     tlsClientConfig = RatisHelper.createTlsClientConfig(
         secConf, certClient != null ? certClient.getCACertificate() : null);
+
+    isStarted = new AtomicBoolean(false);
   }
 
   public GrpcTlsConfig getTlsClientConfig() {
@@ -178,7 +182,8 @@
   /**
    * Build's container map.
    */
-  private void buildContainerSet() {
+  public static void buildContainerSet(MutableVolumeSet volumeSet,
+        ContainerSet containerSet, ConfigurationSource config) {
     Iterator<HddsVolume> volumeSetIterator = volumeSet.getVolumesList()
         .iterator();
     ArrayList<Thread> volumeThreads = new ArrayList<>();
@@ -251,7 +256,11 @@
    *
    * @throws IOException
    */
-  public void start(String scmId) throws IOException {
+  public void start(String clusterId) throws IOException {
+    if (!isStarted.compareAndSet(false, true)) {
+      LOG.info("Ignore. OzoneContainer already started.");
+      return;
+    }
     LOG.info("Attempting to start container services.");
     startContainerScrub();
 
@@ -261,7 +270,7 @@
     writeChannel.start();
     readChannel.start();
     hddsDispatcher.init();
-    hddsDispatcher.setScmId(scmId);
+    hddsDispatcher.setClusterId(clusterId);
     blockDeletingService.start();
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
index e1eae72..b7e26c0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
@@ -30,7 +30,11 @@
  */
 public abstract class SCMCommand<T extends GeneratedMessage> implements
     IdentifiableEventPayload {
-  private long id;
+  private final long id;
+
+  // If running upon Ratis, holds term of underlying RaftServer iff current
+  // SCM is a leader. If running without Ratis, holds SCMContext.INVALID_TERM.
+  private long term;
 
   SCMCommand() {
     this.id = HddsIdFactory.getLongId();
@@ -60,4 +64,18 @@
     return id;
   }
 
+  /**
+   * Get term of this command.
+   * @return term
+   */
+  public long getTerm() {
+    return term;
+  }
+
+  /**
+   * Set term of this command.
+   */
+  public void setTerm(long term) {
+    this.term = term;
+  }
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 534f9ef..3e969ac 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -342,19 +342,11 @@
     scmCommandRequests.add(scmCmd);
   }
 
-  /**
-   * Set scmId.
-   * @param id
-   */
-  public void setScmId(String id) {
-    this.scmId = id;
+  public String getClusterId() {
+    return clusterId;
   }
 
-  /**
-   * Set scmId.
-   * @return scmId
-   */
-  public String getScmId() {
-    return scmId;
+  public void setClusterId(String clusterId) {
+    this.clusterId = clusterId;
   }
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index bdc5a42..34f26a7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -130,7 +130,7 @@
       }
       HddsDispatcher hddsDispatcher = new HddsDispatcher(
           conf, containerSet, volumeSet, handlers, context, metrics, null);
-      hddsDispatcher.setScmId(scmId.toString());
+      hddsDispatcher.setClusterId(scmId.toString());
       ContainerCommandResponseProto responseOne = hddsDispatcher
           .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
@@ -294,7 +294,7 @@
 
     HddsDispatcher hddsDispatcher = new HddsDispatcher(
         conf, containerSet, volumeSet, handlers, context, metrics, null);
-    hddsDispatcher.setScmId(scmId.toString());
+    hddsDispatcher.setClusterId(scmId.toString());
     return hddsDispatcher;
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 434e93a..04fa832 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -372,7 +372,7 @@
       final KeyValueHandler kvHandler = new KeyValueHandler(conf,
           UUID.randomUUID().toString(), containerSet, volumeSet, metrics,
           c -> icrReceived.incrementAndGet());
-      kvHandler.setScmID(UUID.randomUUID().toString());
+      kvHandler.setClusterID(UUID.randomUUID().toString());
 
       final ContainerCommandRequestProto createContainer =
           ContainerCommandRequestProto.newBuilder()
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index 7855b69..be261d0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -70,7 +70,7 @@
 
   private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
   private UUID datanodeId;
-  private String scmId = UUID.randomUUID().toString();
+  private String clusterId = UUID.randomUUID().toString();
   private int blockCount = 10;
   private long blockLen = 1024;
 
@@ -85,7 +85,7 @@
     datanodeId = UUID.randomUUID();
     hddsVolume = new HddsVolume.Builder(volumeDir
         .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId
-        .toString()).build();
+        .toString()).clusterID(clusterId).build();
 
     volumeSet = mock(MutableVolumeSet.class);
     volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
@@ -101,7 +101,7 @@
       KeyValueContainer keyValueContainer =
           new KeyValueContainer(keyValueContainerData,
               conf);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+      keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
 
 
       List<Long> blkNames;
@@ -225,7 +225,7 @@
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
         datanodeDirs.toString());
     MutableVolumeSet volumeSets =
-        new MutableVolumeSet(datanodeId.toString(), conf);
+        new MutableVolumeSet(datanodeId.toString(), clusterId, conf);
     ContainerCache cache = ContainerCache.getInstance(conf);
     cache.clear();
 
@@ -243,7 +243,7 @@
       KeyValueContainer keyValueContainer =
           new KeyValueContainer(keyValueContainerData,
               conf);
-      keyValueContainer.create(volumeSets, policy, scmId);
+      keyValueContainer.create(volumeSets, policy, clusterId);
 
       List<Long> blkNames;
       if (i % 2 == 0) {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 96a83a7..96bde76 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -76,7 +76,7 @@
   public TemporaryFolder folder = new TemporaryFolder();
 
   private OzoneConfiguration conf;
-  private String scmId = UUID.randomUUID().toString();
+  private String clusterId = UUID.randomUUID().toString();
   private MutableVolumeSet volumeSet;
   private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
   private KeyValueContainerData keyValueContainerData;
@@ -118,9 +118,10 @@
 
   @Test
   public void testBuildContainerMap() throws Exception {
+
     // Format the volumes
     for (HddsVolume volume : volumeSet.getVolumesList()) {
-      volume.format(UUID.randomUUID().toString());
+      volume.format(clusterId);
       commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0));
     }
 
@@ -138,7 +139,7 @@
           datanodeDetails.getUuidString());
       keyValueContainer = new KeyValueContainer(
           keyValueContainerData, conf);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+      keyValueContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
       myVolume = keyValueContainer.getContainerData().getVolume();
 
       freeBytes = addBlocks(keyValueContainer, 2, 3);
@@ -234,7 +235,8 @@
     // we expect an out of space Exception
     StorageContainerException e = LambdaTestUtils.intercept(
         StorageContainerException.class,
-        () -> keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId)
+        () -> keyValueContainer.
+            create(volumeSet, volumeChoosingPolicy, clusterId)
     );
     if (!DISK_OUT_OF_SPACE.equals(e.getResult())) {
       LOG.info("Unexpected error during container creation", e);
diff --git a/hadoop-hdds/docs/content/design/scmha.md b/hadoop-hdds/docs/content/design/scmha.md
index 5b153af..46acffb 100644
--- a/hadoop-hdds/docs/content/design/scmha.md
+++ b/hadoop-hdds/docs/content/design/scmha.md
@@ -4,7 +4,7 @@
 date: 2020-03-05
 jira: HDDS-2823
 status: implementing
-author: Li Cheng, Nandakumar Vadivelu
+author: Li Cheng, Nandakumar Vadivelu, Rui Wang, Glen Geng, Shashikant Banerjee
 ---
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -24,6 +24,15 @@
 
  Proposal to implement HA similar to the OM HA: Using Apache Ratis to propagate the 
  
-# Link
+# Links
 
- * https://docs.google.com/document/d/1vr_z6mQgtS1dtI0nANoJlzvF1oLV-AtnNJnxAgg69rM/edit?usp=sharing
+The main SCM HA design doc is available from [here](https://docs.google.com/document/d/1vr_z6mQgtS1dtI0nANoJlzvF1oLV-AtnNJnxAgg69rM/edit?usp=sharing)
+
+During the implementation of SCM-HA many smaller design docs are created specific to various areas:
+
+ * [SCM HA Distributed Sequence ID Generator](https://docs.google.com/document/d/1LaXz_mjeXPmIKys3oogxQSDLVQOzewpIp3baPGT0Vqw/edit): about generating unique identifier across multiple nodes of the HA quorum
+ * [SCM HA Service Manager](https://docs.google.com/document/d/1DbbqP0m3g_iEpY9qkSGOuQgcCN-QqlSNgWpvBOLv5h0/edit): about starting and stopping the main SCM services (like PipelienManager, ReplicationManager) in case of a failover
+ * [SCM HA SCMContext](https://docs.google.com/document/d/1h_3gpC4o2EpuBlcQiJC7MMoZz9JmaMX9CxObSxWU614/edit): about using a helper object which includes all the key information for all the required service components
+ * [SCM HA Snapshots](https://docs.google.com/document/d/1uy4_ER2V6nNQJ7_5455Wz8NmI142JHPnif6Y1OdPi8E/edit): about RAFT state-machine snapshots
+ * [SCM HA: DeleteBlockLog](https://docs.google.com/document/d/166Aea2EowSGWtAFWNlDv0gu4rA06dQ2rJAsBd-l210Q/edit): about coordinating block deletions in HA environment
+ * [SCM HA: bootstrap](https://issues.apache.org/jira/secure/attachment/13021254/SCM%20HA%20Bootstrap_updated.pdf): about initializing the SCM HA cluster
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/feature/HA.md b/hadoop-hdds/docs/content/feature/OM-HA.md
similarity index 78%
rename from hadoop-hdds/docs/content/feature/HA.md
rename to hadoop-hdds/docs/content/feature/OM-HA.md
index 3f8ad53..1da660c 100644
--- a/hadoop-hdds/docs/content/feature/HA.md
+++ b/hadoop-hdds/docs/content/feature/OM-HA.md
@@ -1,10 +1,10 @@
 ---
-title: "High Availability"
+title: "OM High Availability"
 weight: 1
 menu:
    main:
       parent: Features
-summary: HA setup for Ozone to avoid any single point of failure.
+summary: HA setup for Ozone Manager to avoid any single point of failure.
 ---
 <!---
   Licensed to the Apache Software Foundation (ASF) under one or more
@@ -23,19 +23,19 @@
   limitations under the License.
 -->
 
-Ozone has two leader nodes (*Ozone Manager* for key space management and *Storage Container Management* for block space management) and storage nodes (Datanode). Data is replicated between datanodes with the help of RAFT consensus algorithm.
+Ozone has two metadata-manager nodes (*Ozone Manager* for key space management and *Storage Container Management* for block space management) and multiple storage nodes (Datanode). Data is replicated between Datanodes with the help of RAFT consensus algorithm.
 
-To avoid any single point of failure the leader nodes also should have a HA setup.
+To avoid any single point of failure the metadata-manager nodes also should have a HA setup.
 
- 1. HA of Ozone Manager is implemented with the help of RAFT (Apache Ratis)
- 2. HA of Storage Container Manager is [under implementation]({{< ref "scmha.md">}})
+Both Ozone Manager and Storage Container Manager supports HA. In this mode the internal state is replicated via RAFT (with Apache Ratis) 
+
+This document explain the HA setup of Ozone Manager (OM) HA, please check [this page]({{< ref "SCM-HA" >}}) for SCM HA.  While they can be setup for HA independently, a reliable, full HA setup requires enabling HA for both services.
 
 ## Ozone Manager HA
 
-A single Ozone Manager uses [RocksDB](https://github.com/facebook/rocksdb/) to persiste metadata (volumes, buckets, keys) locally. HA version of Ozone Manager does exactly the same but all the data is replicated with the help of the RAFT consensus algorithm to follower Ozone Manager instances.
+A single Ozone Manager uses [RocksDB](https://github.com/facebook/rocksdb/) to persist metadata (volumes, buckets, keys) locally. HA version of Ozone Manager does exactly the same but all the data is replicated with the help of the RAFT consensus algorithm to follower Ozone Manager instances.
 
 ![OM HA](HA-OM.png)
-
 Client connects to the Leader Ozone Manager which process the request and schedule the replication with RAFT. When the request is replicated to all the followers the leader can return with the response.
 
 ## Configuration
@@ -112,4 +112,4 @@
 ## References
 
  * Check [this page]({{< ref "design/omha.md" >}}) for the links to the original design docs
- * Ozone distribution contains an example OM HA configuration, under the `compose/ozone-om-ha` directory which can be tested with the help of [docker-compose]({{< ref "start/RunningViaDocker.md" >}}).
\ No newline at end of file
+ * Ozone distribution contains an example OM HA configuration, under the `compose/ozone-om-ha` directory which can be tested with the help of [docker-compose]({{< ref "start/RunningViaDocker.md" >}}).
diff --git a/hadoop-hdds/docs/content/feature/HA.zh.md b/hadoop-hdds/docs/content/feature/OM-HA.zh.md
similarity index 100%
rename from hadoop-hdds/docs/content/feature/HA.zh.md
rename to hadoop-hdds/docs/content/feature/OM-HA.zh.md
diff --git a/hadoop-hdds/docs/content/feature/SCM-HA.md b/hadoop-hdds/docs/content/feature/SCM-HA.md
new file mode 100644
index 0000000..4a84aa9
--- /dev/null
+++ b/hadoop-hdds/docs/content/feature/SCM-HA.md
@@ -0,0 +1,162 @@
+---
+title: "SCM High Availability"
+weight: 1
+menu:
+   main:
+      parent: Features
+summary: HA setup for Storage Container Manager to avoid any single point of failure.
+---
+<!---
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+Ozone has two metadata-manager nodes (*Ozone Manager* for key space management and *Storage Container Management* for block space management) and multiple storage nodes (Datanode). Data is replicated between Datanodes with the help of RAFT consensus algorithm.
+
+<div class="alert alert-warning" role="alert">
+Please note that SCM-HA is not ready for production in secure environments. Security work is in progress and will be finished soon.
+</div>
+
+To avoid any single point of failure the metadata-manager nodes also should have a HA setup.
+
+Both Ozone Manager and Storage Container Manager supports HA. In this mode the internal state is replicated via RAFT (with Apache Ratis) 
+
+This document explains the HA setup of Storage Container Manager (SCM), please check [this page]({{< ref "OM-HA" >}}) for HA setup of Ozone Manager (OM). While they can be setup for HA independently, a reliable, full HA setup requires enabling HA for both services. 
+
+## Configuration
+
+HA mode of Storage Container Manager can be enabled with the following settings in `ozone-site.xml`:
+
+```XML
+<property>
+   <name>ozone.scm.ratis.enable</name>
+   <value>true</value>
+</property>
+```
+One Ozone configuration (`ozone-site.xml`) can support multiple SCM HA node set, multiple Ozone clusters. To select between the available SCM nodes a logical name is required for each of the clusters which can be resolved to the IP addresses (and domain names) of the Storage Container Managers.
+
+This logical name is called `serviceId` and can be configured in the `ozone-site.xml`
+
+Most of the time you need to set only the values of your current cluster:
+
+ ```XML
+<property>
+   <name>ozone.scm.service.ids</name>
+   <value>cluster1</value>
+</property>
+```
+
+For each of the defined `serviceId` a logical configuration name should be defined for each of the servers
+
+```XML
+<property>
+   <name>ozone.scm.nodes.cluster1</name>
+   <value>scm1,scm2,scm3</value>
+</property>
+```
+
+The defined prefixes can be used to define the address of each of the SCM services:
+
+```XML
+<property>
+   <name>ozone.scm.address.cluster1.scm1</name>
+   <value>host1</value>
+</property>
+<property>
+   <name>ozone.scm.address.cluster1.scm1</name>
+   <value>host2</value>
+</property>
+<property>
+   <name>ozone.scm.address.cluster1.scm1</name>
+   <value>host3</value>
+</property>
+```
+
+For reliable HA support choose 3 independent nodes to form a quorum. 
+
+## Bootstrap
+
+The initialization of the **first** SCM-HA node is the same as a none-HA SCM:
+
+```
+bin/ozone scm --init
+```
+
+Second and third nodes should be *bootstrapped* instead of init. These clusters will join to the configured RAFT quorum. The id of the current server is identified by DNS name or can be set explicitly by `ozone.scm.node.id`. Most of the time you don't need to set it as DNS based id detection can work well.
+
+```
+bin/ozone scm --bootstrap
+```
+
+## Auto-bootstrap
+
+In some environment -- such as containerized / K8s environment -- we need to have a common, unified way to initialize SCM HA quorum. As a remained, the standard initialization flow is the following:
+
+ 1. On the first, "primordial" node, call `scm --init`
+ 2. On second/third nodes call `scm --bootstrap`
+
+This can be changed with using `ozone.scm.primordial.node.id`. You can define the primordial node. After setting this node, you should execute **both** `scm --init` and `scm --bootstrap` on **all** nodes.
+
+Based on the `ozone.scm.primordial.node.id`, the init process will be ignored on the second/third nodes and bootstrap process will be ignored on all nodes except the primordial one.
+
+## Implementation details
+
+SCM HA uses Apache Ratis to replicate state between the members of the SCM HA quorum. Each node maintains the block management metadata in local RocksDB.
+
+This replication process is a simpler version of OM HA replication process as it doesn't use any double buffer (as the overall db thourghput of SCM requests are lower)
+
+Datanodes are sending all the reports (Container reports, Pipeline reports...) to *all* the Datanodes parallel. Only the leader node can assign/create new containers, and only the leader node sends command back to the Datanodes.
+
+## Verify SCM HA setup
+
+After starting an SCM-HA it can be validated if the SCM nodes are forming one single quorum instead of 3 individual SCM nodes.
+
+First, check if all the SCM nodes store the same ClusterId metadata:
+
+```bash
+cat /data/metadata/scm/current/VERSION
+```
+
+ClusterId is included in the VERSION file and should be the same in all the SCM nodes:
+
+```bash
+#Tue Mar 16 10:19:33 UTC 2021
+cTime=1615889973116
+clusterID=CID-130fb246-1717-4313-9b62-9ddfe1bcb2e7
+nodeType=SCM
+scmUuid=e6877ce5-56cd-4f0b-ad60-4c8ef9000882
+layoutVersion=0
+```
+
+You can also create data and double check with `ozone debug` tool if all the container metadata is replicated.
+
+```shell
+bin/ozone freon randomkeys --numOfVolumes=1 --numOfBuckets=1 --numOfKeys=10000 --keySize=524288 --replicationType=RATIS --numOfThreads=8 --factor=THREE --bufferSize=1048576
+ 
+ 
+// use debug ldb to check scm db on all the machines
+bin/ozone debug ldb --db=/tmp/metadata/scm.db/ ls
+ 
+ 
+bin/ozone debug ldb --db=/tmp/metadata/scm.db/ scan --with-keys --column_family=containers
+```
+
+## Migrating from existing SCM
+
+SCM HA can be turned on on any Ozone cluster. First enable Ratis (`ozone.scm.ratis.enable`) and configure only one node for the Ratis ring (`ozone.scm.nodes.NAME` should have one element).
+
+Start the cluster and test if it works well.
+
+If everything is fine, you can extend the cluster configuration with multiple nodes, restart SCM node, and initialize the additional nodes with `scm --bootstrap` command.
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
index 52dc033..778a6d4 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmNodeDetailsProto;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.security.KerberosInfo;
 
@@ -62,6 +63,18 @@
   String getOMCertificate(OzoneManagerDetailsProto omDetails,
       String certSignReq) throws IOException;
 
+
+  /**
+   * Get signed certificate for SCM.
+   *
+   * @param scmNodeDetails  - SCM Node Details.
+   * @param certSignReq     - Certificate signing request.
+   * @return String         - pem encoded SCM signed
+   *                          certificate.
+   */
+  String getSCMCertificate(ScmNodeDetailsProto scmNodeDetails,
+      String certSignReq) throws IOException;
+
   /**
    * Get SCM signed certificate for given certificate serial id if it exists.
    * Throws exception if it's not found.
@@ -92,4 +105,31 @@
   List<String> listCertificate(HddsProtos.NodeType type, long startSerialId,
       int count, boolean isRevoked) throws IOException;
 
+  /**
+   * Get Root CA certificate.
+   * @return
+   * @throws IOException
+   */
+  String getRootCACertificate() throws IOException;
+
+  /**
+   * Returns all the individual SCM CA's along with Root CA.
+   *
+   * For example 3 nodes SCM HA cluster, the output will be
+   *
+   * SCM1 CA
+   * SCM2 CA
+   * SCM3 CA
+   * Root CA
+   * @return list of CA's
+   *
+   * For example on non-HA cluster the output will be SCM CA and Root CA.
+   *
+   * SCM CA
+   * Root CA
+   *
+   * @throws IOException
+   */
+  List<String> listCACertificate() throws IOException;
+
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
index aeef50e..0ab5d7e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java
@@ -21,21 +21,28 @@
 import java.util.List;
 import java.util.function.Consumer;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmNodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetSCMCertRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMListCACertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMListCertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest.Builder;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Type;
+import org.apache.hadoop.hdds.scm.proxy.SCMSecurityProtocolFailoverProxyProvider;
+import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
@@ -56,12 +63,22 @@
    */
   private static final RpcController NULL_RPC_CONTROLLER = null;
   private final SCMSecurityProtocolPB rpcProxy;
+  private SCMSecurityProtocolFailoverProxyProvider failoverProxyProvider;
 
   public SCMSecurityProtocolClientSideTranslatorPB(
       SCMSecurityProtocolPB rpcProxy) {
     this.rpcProxy = rpcProxy;
   }
 
+  public SCMSecurityProtocolClientSideTranslatorPB(
+      SCMSecurityProtocolFailoverProxyProvider proxyProvider) {
+    Preconditions.checkState(proxyProvider != null);
+    this.failoverProxyProvider = proxyProvider;
+    this.rpcProxy = (SCMSecurityProtocolPB) RetryProxy.create(
+        SCMSecurityProtocolPB.class, failoverProxyProvider,
+        failoverProxyProvider.getRetryPolicy());
+  }
+
   /**
    * Helper method to wrap the request and send the message.
    */
@@ -78,6 +95,9 @@
       SCMSecurityRequest wrapper = builder.build();
 
       response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+
+      handleError(response);
+
     } catch (ServiceException ex) {
       throw ProtobufHelper.getRemoteException(ex);
     }
@@ -85,6 +105,20 @@
   }
 
   /**
+   * If response is not successful, throw exception.
+   * @param resp - SCMSecurityResponse
+   * @return if response is success, return response, else throw exception.
+   * @throws SCMSecurityException
+   */
+  private SCMSecurityResponse handleError(SCMSecurityResponse resp)
+      throws SCMSecurityException {
+    if (resp.getStatus() != SCMSecurityProtocolProtos.Status.OK) {
+      throw new SCMSecurityException(resp.getMessage(),
+          SCMSecurityException.ErrorCode.values()[resp.getStatus().ordinal()]);
+    }
+    return resp;
+  }
+  /**
    * Closes this stream and releases any system resources associated
    * with it. If the stream is already closed then invoking this
    * method has no effect.
@@ -130,6 +164,41 @@
   }
 
   /**
+   * Get signed certificate for SCM node.
+   *
+   * @param scmNodeDetails  - SCM Node Details.
+   * @param certSignReq     - Certificate signing request.
+   * @return String         - pem encoded SCM signed
+   *                          certificate.
+   */
+  public String getSCMCertificate(ScmNodeDetailsProto scmNodeDetails,
+      String certSignReq) throws IOException {
+    return getSCMCertChain(scmNodeDetails, certSignReq).getX509Certificate();
+  }
+
+
+  /**
+   * Get signed certificate for SCM node and root CA certificate.
+   *
+   * @param scmNodeDetails   - SCM Node Details.
+   * @param certSignReq      - Certificate signing request.
+   * @return SCMGetCertResponseProto  - SCMGetCertResponseProto which holds
+   * signed certificate and root CA certificate.
+   */
+  public SCMGetCertResponseProto getSCMCertChain(
+      ScmNodeDetailsProto scmNodeDetails, String certSignReq)
+      throws IOException {
+    SCMGetSCMCertRequestProto request =
+        SCMGetSCMCertRequestProto.newBuilder()
+            .setCSR(certSignReq)
+            .setScmDetails(scmNodeDetails)
+            .build();
+    return submitRequest(Type.GetSCMCertificate,
+        builder -> builder.setGetSCMCertificateRequest(request))
+        .getGetCertResponseProto();
+  }
+
+  /**
    * Get SCM signed certificate for OM.
    *
    * @param omDetails   - OzoneManager Details.
@@ -228,6 +297,24 @@
         .getListCertificateResponseProto().getCertificatesList();
   }
 
+  @Override
+  public String getRootCACertificate() throws IOException {
+    SCMGetCACertificateRequestProto protoIns = SCMGetCACertificateRequestProto
+        .getDefaultInstance();
+    return submitRequest(Type.GetCACertificate,
+        builder -> builder.setGetCACertificateRequest(protoIns))
+        .getGetCertResponseProto().getX509RootCACertificate();
+  }
+
+  @Override
+  public List<String> listCACertificate() throws IOException {
+    SCMListCACertificateRequestProto proto =
+        SCMListCACertificateRequestProto.getDefaultInstance();
+    return submitRequest(Type.ListCACertificate,
+        builder -> builder.setListCACertificateRequestProto(proto))
+        .getListCertificateResponseProto().getCertificatesList();
+  }
+
   /**
    * Return the proxy object underlying this protocol translator.
    *
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/DBTransactionBuffer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/DBTransactionBuffer.java
new file mode 100644
index 0000000..ef17470
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/DBTransactionBuffer.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.metadata;
+
+import org.apache.hadoop.hdds.utils.db.Table;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/**
+ * DB transaction that abstracts the updates to the underlying datastore.
+ */
+public interface DBTransactionBuffer extends Closeable {
+
+  <KEY, VALUE> void addToBuffer(Table<KEY, VALUE> table, KEY key, VALUE value)
+      throws IOException;
+
+  <KEY, VALUE> void removeFromBuffer(Table<KEY, VALUE> table, KEY key)
+      throws IOException;
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/Replicate.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/Replicate.java
new file mode 100644
index 0000000..aeed57c
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/Replicate.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.metadata;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * TODO: Add javadoc.
+ */
+@Inherited
+@Target(ElementType.METHOD)
+@Retention(RetentionPolicy.RUNTIME)
+public @interface Replicate {
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBTransactionBufferImpl.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBTransactionBufferImpl.java
new file mode 100644
index 0000000..92777fb
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBTransactionBufferImpl.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.metadata;
+
+import org.apache.hadoop.hdds.utils.db.Table;
+
+import java.io.IOException;
+
+/**
+ * Default implementation for DBTransactionBuffer for SCM without Ratis.
+ */
+public class SCMDBTransactionBufferImpl implements DBTransactionBuffer {
+
+  public SCMDBTransactionBufferImpl() {
+
+  }
+
+  @Override
+  public <KEY, VALUE> void addToBuffer(
+      Table<KEY, VALUE> table, KEY key, VALUE value) throws IOException {
+    table.put(key, value);
+  }
+
+  @Override
+  public <KEY, VALUE>void removeFromBuffer(Table<KEY, VALUE> table, KEY key)
+      throws IOException {
+    table.delete(key);
+  }
+
+  @Override
+  public void close() {
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
similarity index 89%
rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
rename to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
index 03d928c..c08f7ac 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.utils.DBStoreHAManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
@@ -41,7 +42,7 @@
  * This is similar to the OMMetadataStore class,
  * where we write classes into some underlying storage system.
  */
-public interface SCMMetadataStore {
+public interface SCMMetadataStore extends DBStoreHAManager {
   /**
    * Start metadata manager.
    *
@@ -77,6 +78,15 @@
    */
   Table<BigInteger, X509Certificate> getValidCertsTable();
 
+
+  /**
+   * A table that maintains all the valid certificates of SCM nodes issued by
+   * the SCM CA.
+   *
+   * @return Table
+   */
+  Table<BigInteger, X509Certificate> getValidSCMCertsTable();
+
   /**
    * A Table that maintains all revoked certificates until they expire.
    *
@@ -121,4 +131,9 @@
    * Table that maintains all the container information.
    */
   Table<ContainerID, ContainerInfo> getContainerTable();
+
+  /**
+   * Table that maintains sequence id information.
+   */
+  Table<String, Long> getSequenceIdTable();
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java
similarity index 61%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java
index 4944017..3dcf08b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java
@@ -1,13 +1,13 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
+ * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
+ * regarding copyright ownership.  The ASF licenses this file
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
+ *  with the License.  You may obtain a copy of the License at
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *      http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,8 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
-
 /**
- * This package contains classes related to Apache Ratis for SCM.
+ * Metadata specific package utility for SCM.
  */
+package org.apache.hadoop.hdds.scm.metadata;
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index 9c9ba50..9137c6e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdds.scm.protocol;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.security.KerberosInfo;
@@ -78,6 +79,11 @@
   ScmInfo getScmInfo() throws IOException;
 
   /**
+   * Request to add SCM instance to HA group.
+   */
+  boolean addSCM(AddSCMRequest request) throws IOException;
+
+  /**
    * Sort datanodes with distance to client.
    * @param nodes list of network name of each node.
    * @param clientMachine client address, depends, can be hostname or ipaddress.
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
index c7989c3..0662a81 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
@@ -26,6 +26,7 @@
 import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type;
@@ -39,16 +40,18 @@
     .SortDatanodesRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
     .SortDatanodesResponseProto;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
 
@@ -74,15 +77,21 @@
   private static final RpcController NULL_RPC_CONTROLLER = null;
 
   private final ScmBlockLocationProtocolPB rpcProxy;
+  private SCMBlockLocationFailoverProxyProvider failoverProxyProvider;
 
   /**
    * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
    *
-   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
+   * @param proxyProvider {@link SCMBlockLocationFailoverProxyProvider}
+   * failover proxy provider.
    */
   public ScmBlockLocationProtocolClientSideTranslatorPB(
-      ScmBlockLocationProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
+      SCMBlockLocationFailoverProxyProvider proxyProvider) {
+    Preconditions.checkState(proxyProvider != null);
+    this.failoverProxyProvider = proxyProvider;
+    this.rpcProxy = (ScmBlockLocationProtocolPB) RetryProxy.create(
+        ScmBlockLocationProtocolPB.class, failoverProxyProvider,
+        failoverProxyProvider.getSCMBlockLocationRetryPolicy(null));
   }
 
   /**
@@ -107,6 +116,11 @@
     try {
       SCMBlockLocationResponse response =
           rpcProxy.send(NULL_RPC_CONTROLLER, req);
+      if (response.getStatus() ==
+          ScmBlockLocationProtocolProtos.Status.SCM_NOT_LEADER) {
+        failoverProxyProvider
+            .performFailoverToAssignedLeader(response.getLeaderSCMNodeId());
+      }
       return response;
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
@@ -235,6 +249,26 @@
   }
 
   /**
+   * Request to add SCM to existing SCM HA group.
+   * @return status
+   * @throws IOException
+   */
+  @Override
+  public boolean addSCM(AddSCMRequest request) throws IOException {
+    HddsProtos.AddScmRequestProto requestProto =
+        request.getProtobuf();
+    HddsProtos.AddScmResponseProto resp;
+    SCMBlockLocationRequest wrapper = createSCMBlockRequest(
+        Type.AddScm)
+        .setAddScmRequestProto(requestProto)
+        .build();
+
+    final SCMBlockLocationResponse wrappedResponse =
+        handleError(submitRequest(wrapper));
+    resp = wrappedResponse.getAddScmResponse();
+    return resp.getSuccess();
+  }
+  /**
    * Sort the datanodes based on distance from client.
    * @return List<DatanodeDetails></>
    * @throws IOException
@@ -269,7 +303,7 @@
   }
 
   @Override
-  public void close() {
-    RPC.stopProxy(rpcProxy);
+  public void close() throws IOException {
+    failoverProxyProvider.close();
   }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 67c2e02..75d5864 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -73,7 +73,9 @@
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
@@ -103,15 +105,21 @@
   private static final RpcController NULL_RPC_CONTROLLER = null;
 
   private final StorageContainerLocationProtocolPB rpcProxy;
+  private final SCMContainerLocationFailoverProxyProvider fpp;
 
   /**
    * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
    *
-   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
+   * @param proxyProvider {@link SCMContainerLocationFailoverProxyProvider}
    */
   public StorageContainerLocationProtocolClientSideTranslatorPB(
-      StorageContainerLocationProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
+      SCMContainerLocationFailoverProxyProvider proxyProvider) {
+    Preconditions.checkNotNull(proxyProvider);
+    this.fpp = proxyProvider;
+    this.rpcProxy = (StorageContainerLocationProtocolPB) RetryProxy.create(
+        StorageContainerLocationProtocolPB.class,
+        fpp,
+        fpp.getRetryPolicy());
   }
 
   /**
@@ -122,7 +130,6 @@
       Consumer<Builder> builderConsumer) throws IOException {
     final ScmContainerLocationResponse response;
     try {
-
       Builder builder = ScmContainerLocationRequest.newBuilder()
           .setCmdType(type)
           .setVersion(CURRENT_VERSION)
@@ -139,7 +146,16 @@
 
   private ScmContainerLocationResponse submitRpcRequest(
       ScmContainerLocationRequest wrapper) throws ServiceException {
-    return rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+    if (!ADMIN_COMMAND_TYPE.contains(wrapper.getCmdType())) {
+      return rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+    }
+
+    // TODO: Modify ScmContainerLocationResponse to hold results from multi SCM
+    ScmContainerLocationResponse response = null;
+    for (StorageContainerLocationProtocolPB proxy : fpp.getProxies()) {
+      response = proxy.submitRequest(NULL_RPC_CONTROLLER, wrapper);
+    }
+    return response;
   }
 
   /**
@@ -558,7 +574,9 @@
         .getGetScmInfoResponse();
     ScmInfo.Builder builder = new ScmInfo.Builder()
         .setClusterId(resp.getClusterId())
-        .setScmId(resp.getScmId());
+        .setScmId(resp.getScmId())
+        .setRatisPeerRoles(resp.getPeerRolesList());
+
     return builder.build();
 
   }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java
new file mode 100644
index 0000000..d982cf5
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.proxy;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeInfo;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.io.retry.FailoverProxyProvider;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_DUMMY_SERVICE_ID;
+
+/**
+ * Failover proxy provider for SCM block location.
+ */
+public class SCMBlockLocationFailoverProxyProvider implements
+    FailoverProxyProvider<ScmBlockLocationProtocolPB>, Closeable {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SCMBlockLocationFailoverProxyProvider.class);
+
+  private Map<String, ProxyInfo<ScmBlockLocationProtocolPB>> scmProxies;
+  private Map<String, SCMProxyInfo> scmProxyInfoMap;
+  private List<String> scmNodeIds;
+
+  private String currentProxySCMNodeId;
+  private int currentProxyIndex;
+
+  private final ConfigurationSource conf;
+  private final long scmVersion;
+
+  private String scmServiceId;
+
+  private String lastAttemptedLeader;
+
+  private final int maxRetryCount;
+  private final long retryInterval;
+
+
+  public SCMBlockLocationFailoverProxyProvider(ConfigurationSource conf) {
+    this.conf = conf;
+    this.scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
+
+    // Set some constant for non-HA.
+    if (scmServiceId == null) {
+      scmServiceId = SCM_DUMMY_SERVICE_ID;
+    }
+    this.scmProxies = new HashMap<>();
+    this.scmProxyInfoMap = new HashMap<>();
+
+    loadConfigs();
+
+    this.currentProxyIndex = 0;
+    currentProxySCMNodeId = scmNodeIds.get(currentProxyIndex);
+
+    SCMClientConfig config = conf.getObject(SCMClientConfig.class);
+    this.maxRetryCount = config.getRetryCount();
+    this.retryInterval = config.getRetryInterval();
+  }
+
+  private void loadConfigs() {
+
+    scmNodeIds = new ArrayList<>();
+    List<SCMNodeInfo> scmNodeInfoList = SCMNodeInfo.buildNodeInfo(conf);
+
+    for (SCMNodeInfo scmNodeInfo : scmNodeInfoList) {
+      if (scmNodeInfo.getBlockClientAddress() == null) {
+        throw new ConfigurationException("SCM BlockClient Address could not " +
+            "be obtained from config. Config is not properly defined");
+      } else {
+        InetSocketAddress scmBlockClientAddress =
+            NetUtils.createSocketAddr(scmNodeInfo.getBlockClientAddress());
+
+        scmServiceId = scmNodeInfo.getServiceId();
+        String scmNodeId = scmNodeInfo.getNodeId();
+        scmNodeIds.add(scmNodeId);
+        SCMProxyInfo scmProxyInfo = new SCMProxyInfo(
+            scmNodeInfo.getServiceId(), scmNodeInfo.getNodeId(),
+            scmBlockClientAddress);
+        ProxyInfo<ScmBlockLocationProtocolPB> proxy
+            = new ProxyInfo<>(null, scmProxyInfo.toString());
+        scmProxies.put(scmNodeId, proxy);
+        scmProxyInfoMap.put(scmNodeId, scmProxyInfo);
+      }
+    }
+  }
+
+  @VisibleForTesting
+  public synchronized String getCurrentProxyOMNodeId() {
+    return currentProxySCMNodeId;
+  }
+
+  @Override
+  public synchronized ProxyInfo getProxy() {
+    ProxyInfo currentProxyInfo = scmProxies.get(currentProxySCMNodeId);
+    createSCMProxyIfNeeded(currentProxyInfo, currentProxySCMNodeId);
+    return currentProxyInfo;
+  }
+
+  @Override
+  public void performFailover(ScmBlockLocationProtocolPB newLeader) {
+    // Should do nothing here.
+    LOG.debug("Failing over to next proxy. {}", getCurrentProxyOMNodeId());
+  }
+
+  public void performFailoverToAssignedLeader(String newLeader) {
+    if (newLeader == null) {
+      // If newLeader is not assigned, it will fail over to next proxy.
+      nextProxyIndex();
+    } else {
+      if (!assignLeaderToNode(newLeader)) {
+        LOG.debug("Failing over SCM proxy to nodeId: {}", newLeader);
+        nextProxyIndex();
+      }
+    }
+  }
+
+  @Override
+  public Class<ScmBlockLocationProtocolPB> getInterface() {
+    return ScmBlockLocationProtocolPB.class;
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    for (ProxyInfo<ScmBlockLocationProtocolPB> proxy : scmProxies.values()) {
+      ScmBlockLocationProtocolPB scmProxy = proxy.proxy;
+      if (scmProxy != null) {
+        RPC.stopProxy(scmProxy);
+      }
+    }
+  }
+
+  public RetryAction getRetryAction(int failovers) {
+    if (failovers < maxRetryCount) {
+      return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+          getRetryInterval());
+    } else {
+      return RetryAction.FAIL;
+    }
+  }
+
+  private synchronized long getRetryInterval() {
+    // TODO add exponential backup
+    return retryInterval;
+  }
+
+  private synchronized int nextProxyIndex() {
+    lastAttemptedLeader = currentProxySCMNodeId;
+
+    // round robin the next proxy
+    currentProxyIndex = (currentProxyIndex + 1) % scmProxies.size();
+    currentProxySCMNodeId =  scmNodeIds.get(currentProxyIndex);
+    return currentProxyIndex;
+  }
+
+  private synchronized boolean assignLeaderToNode(String newLeaderNodeId) {
+    if (!currentProxySCMNodeId.equals(newLeaderNodeId)) {
+      if (scmProxies.containsKey(newLeaderNodeId)) {
+        lastAttemptedLeader = currentProxySCMNodeId;
+        currentProxySCMNodeId = newLeaderNodeId;
+        currentProxyIndex = scmNodeIds.indexOf(currentProxySCMNodeId);
+        return true;
+      }
+    } else {
+      lastAttemptedLeader = currentProxySCMNodeId;
+    }
+    return false;
+  }
+
+  /**
+   * Creates proxy object if it does not already exist.
+   */
+  private void createSCMProxyIfNeeded(ProxyInfo proxyInfo,
+                                     String nodeId) {
+    if (proxyInfo.proxy == null) {
+      InetSocketAddress address = scmProxyInfoMap.get(nodeId).getAddress();
+      try {
+        ScmBlockLocationProtocolPB proxy = createSCMProxy(address);
+        try {
+          proxyInfo.proxy = proxy;
+        } catch (IllegalAccessError iae) {
+          scmProxies.put(nodeId,
+              new ProxyInfo<>(proxy, proxyInfo.proxyInfo));
+        }
+      } catch (IOException ioe) {
+        LOG.error("{} Failed to create RPC proxy to SCM at {}",
+            this.getClass().getSimpleName(), address, ioe);
+        throw new RuntimeException(ioe);
+      }
+    }
+  }
+
+  private ScmBlockLocationProtocolPB createSCMProxy(
+      InetSocketAddress scmAddress) throws IOException {
+    Configuration hadoopConf =
+        LegacyHadoopConfigurationSource.asHadoopConfiguration(conf);
+    RPC.setProtocolEngine(hadoopConf, ScmBlockLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    return RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
+        scmAddress, UserGroupInformation.getCurrentUser(), hadoopConf,
+        NetUtils.getDefaultSocketFactory(hadoopConf),
+        (int)conf.getObject(SCMClientConfig.class).getRpcTimeOut());
+  }
+
+  public RetryPolicy getSCMBlockLocationRetryPolicy(String newLeader) {
+    RetryPolicy retryPolicy = new RetryPolicy() {
+      @Override
+      public RetryAction shouldRetry(Exception e, int retry,
+                                     int failover, boolean b) {
+        performFailoverToAssignedLeader(newLeader);
+        return getRetryAction(failover);
+      }
+    };
+    return retryPolicy;
+  }
+}
+
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java
new file mode 100644
index 0000000..99dc446
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.proxy;
+
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigType;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.conf.ConfigTag.CLIENT;
+import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.SCM;
+
+/**
+ * Config for SCM Block Client.
+ */
+@ConfigGroup(prefix = "hdds.scmclient")
+public class SCMClientConfig {
+  public static final String SCM_CLIENT_RPC_TIME_OUT = "rpc.timeout";
+  public static final String SCM_CLIENT_FAILOVER_MAX_RETRY =
+      "failover.max.retry";
+  public static final String SCM_CLIENT_RETRY_INTERVAL =
+      "failover.retry.interval";
+
+  @Config(key = SCM_CLIENT_RPC_TIME_OUT,
+      defaultValue = "15m",
+      type = ConfigType.TIME,
+      tags = {OZONE, SCM, CLIENT},
+      timeUnit = TimeUnit.MILLISECONDS,
+      description = "RpcClient timeout on waiting for the response from " +
+          "SCM. The default value is set to 15 minutes. " +
+          "If ipc.client.ping is set to true and this rpc-timeout " +
+          "is greater than the value of ipc.ping.interval, the effective " +
+          "value of the rpc-timeout is rounded up to multiple of " +
+          "ipc.ping.interval."
+  )
+  private long rpcTimeOut = 15 * 60 * 1000;
+
+  @Config(key = SCM_CLIENT_FAILOVER_MAX_RETRY,
+      defaultValue = "15",
+      type = ConfigType.INT,
+      tags = {OZONE, SCM, CLIENT},
+      description = "Max retry count for SCM Client when failover happens."
+  )
+  private int retryCount = 15;
+
+  @Config(key = SCM_CLIENT_RETRY_INTERVAL,
+      defaultValue = "2s",
+      type = ConfigType.TIME,
+      tags = {OZONE, SCM, CLIENT},
+      timeUnit = TimeUnit.MILLISECONDS,
+      description = "SCM Client timeout on waiting for the next connection " +
+          "retry to other SCM IP. The default value is set to 2 minutes. "
+  )
+  private long retryInterval = 2 * 1000;
+
+  public long getRpcTimeOut() {
+    return rpcTimeOut;
+  }
+
+  public void setRpcTimeOut(long timeOut) {
+    // As at the end this value should not exceed MAX_VALUE, as underlying
+    // Rpc layer SocketTimeout parameter is int.
+    if (rpcTimeOut > Integer.MAX_VALUE) {
+      this.rpcTimeOut = Integer.MAX_VALUE;
+    }
+    this.rpcTimeOut = timeOut;
+  }
+
+  public int getRetryCount() {
+    return retryCount;
+  }
+
+  public void setRetryCount(int retryCount) {
+    this.retryCount = retryCount;
+  }
+
+  public long getRetryInterval() {
+    return retryInterval;
+  }
+
+  public void setRetryInterval(long retryInterval) {
+    this.retryInterval = retryInterval;
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMContainerLocationFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMContainerLocationFailoverProxyProvider.java
new file mode 100644
index 0000000..ff13122
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMContainerLocationFailoverProxyProvider.java
@@ -0,0 +1,255 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.proxy;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeInfo;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.io.retry.FailoverProxyProvider;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+/**
+ * Failover proxy provider for StorageContainerLocationProtocolPB.
+ */
+public class SCMContainerLocationFailoverProxyProvider implements
+    FailoverProxyProvider<StorageContainerLocationProtocolPB>, Closeable {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SCMContainerLocationFailoverProxyProvider.class);
+
+  // scmNodeId -> ProxyInfo<rpcProxy>
+  private final Map<String,
+      ProxyInfo<StorageContainerLocationProtocolPB>> scmProxies;
+  // scmNodeId -> SCMProxyInfo
+  private final Map<String, SCMProxyInfo> scmProxyInfoMap;
+  private List<String> scmNodeIds;
+
+  private String currentProxySCMNodeId;
+  private int currentProxyIndex;
+
+  private final ConfigurationSource conf;
+  private final SCMClientConfig scmClientConfig;
+  private final long scmVersion;
+
+  private String scmServiceId;
+
+  private final int maxRetryCount;
+  private final long retryInterval;
+
+
+  public SCMContainerLocationFailoverProxyProvider(ConfigurationSource conf) {
+    this.conf = conf;
+    this.scmVersion = RPC.getProtocolVersion(
+        StorageContainerLocationProtocolPB.class);
+
+    this.scmProxies = new HashMap<>();
+    this.scmProxyInfoMap = new HashMap<>();
+    loadConfigs();
+
+    this.currentProxyIndex = 0;
+    currentProxySCMNodeId = scmNodeIds.get(currentProxyIndex);
+    scmClientConfig = conf.getObject(SCMClientConfig.class);
+    this.maxRetryCount = scmClientConfig.getRetryCount();
+    this.retryInterval = scmClientConfig.getRetryInterval();
+  }
+
+  @VisibleForTesting
+  protected void loadConfigs() {
+    List<SCMNodeInfo> scmNodeInfoList = SCMNodeInfo.buildNodeInfo(conf);
+
+    scmNodeIds = new ArrayList<>();
+
+    for (SCMNodeInfo scmNodeInfo : scmNodeInfoList) {
+      if (scmNodeInfo.getScmClientAddress() == null) {
+        throw new ConfigurationException("SCM Client Address could not " +
+            "be obtained from config. Config is not properly defined");
+      } else {
+        InetSocketAddress scmClientAddress =
+            NetUtils.createSocketAddr(scmNodeInfo.getScmClientAddress());
+
+        scmServiceId = scmNodeInfo.getServiceId();
+        String scmNodeId = scmNodeInfo.getNodeId();
+
+        scmNodeIds.add(scmNodeId);
+        SCMProxyInfo scmProxyInfo = new SCMProxyInfo(scmServiceId, scmNodeId,
+            scmClientAddress);
+        ProxyInfo< StorageContainerLocationProtocolPB > proxy
+            = new ProxyInfo<>(null, scmProxyInfo.toString());
+        scmProxies.put(scmNodeId, proxy);
+        scmProxyInfoMap.put(scmNodeId, scmProxyInfo);
+      }
+    }
+  }
+
+  @VisibleForTesting
+  public synchronized String getCurrentProxySCMNodeId() {
+    return currentProxySCMNodeId;
+  }
+
+  @Override
+  public synchronized ProxyInfo<StorageContainerLocationProtocolPB> getProxy() {
+    ProxyInfo<StorageContainerLocationProtocolPB> currentProxyInfo
+        = scmProxies.get(currentProxySCMNodeId);
+    createSCMProxyIfNeeded(currentProxyInfo, currentProxySCMNodeId);
+    return currentProxyInfo;
+  }
+
+  public synchronized List<StorageContainerLocationProtocolPB> getProxies() {
+    scmProxies.forEach(
+        (nodeId, proxyInfo) -> createSCMProxyIfNeeded(proxyInfo, nodeId));
+
+    return scmProxies.values().stream()
+        .map(proxyInfo -> proxyInfo.proxy).collect(Collectors.toList());
+  }
+
+  @Override
+  public synchronized void performFailover(
+      StorageContainerLocationProtocolPB newLeader) {
+    // Should do nothing here.
+    LOG.debug("Failing over to next proxy. {}", getCurrentProxySCMNodeId());
+  }
+
+  public synchronized void performFailoverToAssignedLeader(String newLeader) {
+    if (newLeader == null) {
+      // If newLeader is not assigned, fail over to next proxy.
+      nextProxyIndex();
+    } else if (!assignLeaderToNode(newLeader)) {
+      // If failed to fail over to newLeader, fail over to next proxy.
+      nextProxyIndex();
+    }
+  }
+
+  @Override
+  public Class<StorageContainerLocationProtocolPB> getInterface() {
+    return StorageContainerLocationProtocolPB.class;
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    for (ProxyInfo<StorageContainerLocationProtocolPB>
+        proxy : scmProxies.values()) {
+      StorageContainerLocationProtocolPB scmProxy =
+          proxy.proxy;
+      if (scmProxy != null) {
+        RPC.stopProxy(scmProxy);
+      }
+    }
+  }
+
+  public synchronized RetryPolicy.RetryAction getRetryAction(int failovers) {
+    if (failovers >= maxRetryCount) {
+      return RetryPolicy.RetryAction.FAIL;
+    }
+
+    return new RetryPolicy.RetryAction(
+        RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+        getRetryInterval());
+  }
+
+  private long getRetryInterval() {
+    // TODO add exponential backup
+    return retryInterval;
+  }
+
+  private int nextProxyIndex() {
+    // round robin the next proxy
+    currentProxyIndex = (currentProxyIndex + 1) % scmProxies.size();
+    currentProxySCMNodeId =  scmNodeIds.get(currentProxyIndex);
+    return currentProxyIndex;
+  }
+
+  private boolean assignLeaderToNode(String newLeaderNodeId) {
+    if (!currentProxySCMNodeId.equals(newLeaderNodeId)
+        && scmProxies.containsKey(newLeaderNodeId)) {
+      currentProxySCMNodeId = newLeaderNodeId;
+      currentProxyIndex = scmNodeIds.indexOf(currentProxySCMNodeId);
+
+      LOG.debug("Failing over SCM proxy to nodeId: {}", newLeaderNodeId);
+      return true;
+    }
+
+    return false;
+  }
+
+  /**
+   * Creates proxy object if it does not already exist.
+   */
+  private void createSCMProxyIfNeeded(ProxyInfo proxyInfo,
+                                      String nodeId) {
+    if (proxyInfo.proxy == null) {
+      InetSocketAddress address = scmProxyInfoMap.get(nodeId).getAddress();
+      try {
+        StorageContainerLocationProtocolPB proxy =
+            createSCMProxy(address);
+        try {
+          proxyInfo.proxy = proxy;
+        } catch (IllegalAccessError iae) {
+          scmProxies.put(nodeId,
+              new ProxyInfo<>(proxy, proxyInfo.proxyInfo));
+        }
+      } catch (IOException ioe) {
+        LOG.error("{} Failed to create RPC proxy to SCM at {}",
+            this.getClass().getSimpleName(), address, ioe);
+        throw new RuntimeException(ioe);
+      }
+    }
+  }
+
+  private StorageContainerLocationProtocolPB createSCMProxy(
+      InetSocketAddress scmAddress) throws IOException {
+    Configuration hadoopConf =
+        LegacyHadoopConfigurationSource.asHadoopConfiguration(conf);
+    RPC.setProtocolEngine(hadoopConf, StorageContainerLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    return RPC.getProxy(
+        StorageContainerLocationProtocolPB.class,
+        scmVersion, scmAddress, UserGroupInformation.getCurrentUser(),
+        hadoopConf, NetUtils.getDefaultSocketFactory(hadoopConf),
+        (int)scmClientConfig.getRpcTimeOut());
+  }
+
+  public RetryPolicy getRetryPolicy() {
+    return new RetryPolicy() {
+      @Override
+      public RetryAction shouldRetry(Exception e, int retry,
+                                     int failover, boolean b) {
+        performFailoverToAssignedLeader(null);
+        return getRetryAction(failover);
+      }
+    };
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
new file mode 100644
index 0000000..761d57b
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.proxy;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+
+/**
+ * Class to store SCM proxy info.
+ */
+public class SCMProxyInfo {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMProxyInfo.class);
+
+  private final String serviceId;
+  private final String nodeId;
+  private final String rpcAddrStr;
+  private final InetSocketAddress rpcAddr;
+
+  public SCMProxyInfo(String serviceID, String nodeID,
+                      InetSocketAddress rpcAddress) {
+    Preconditions.checkNotNull(rpcAddress);
+    this.serviceId = serviceID;
+    this.nodeId = nodeID;
+    this.rpcAddrStr = rpcAddress.toString();
+    this.rpcAddr = rpcAddress;
+    if (rpcAddr.isUnresolved()) {
+      LOG.warn("SCM address {} for serviceID {} remains unresolved " +
+              "for node ID {} Check your ozone-site.xml file to ensure scm " +
+              "addresses are configured properly.",
+          rpcAddress, serviceId, nodeId);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return "nodeId=" + nodeId + ",nodeAddress=" + rpcAddrStr;
+  }
+
+  public InetSocketAddress getAddress() {
+    return rpcAddr;
+  }
+
+  public String getServiceId() {
+    return serviceId;
+  }
+
+  public String getNodeId() {
+    return nodeId;
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMSecurityProtocolFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMSecurityProtocolFailoverProxyProvider.java
new file mode 100644
index 0000000..a2d2fb3
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMSecurityProtocolFailoverProxyProvider.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.proxy;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeInfo;
+import org.apache.hadoop.hdds.utils.HAUtils;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.io.retry.FailoverProxyProvider;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY;
+
+/**
+ * Failover proxy provider for SCMSecurityProtocol server.
+ */
+public class SCMSecurityProtocolFailoverProxyProvider implements
+    FailoverProxyProvider<SCMSecurityProtocolPB>, Closeable {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SCMSecurityProtocolFailoverProxyProvider.class);
+
+  // scmNodeId -> ProxyInfo<rpcProxy>
+  private final Map<String,
+      ProxyInfo<SCMSecurityProtocolPB>> scmProxies;
+
+  // scmNodeId -> SCMProxyInfo
+  private final Map<String, SCMProxyInfo> scmProxyInfoMap;
+
+  private List<String> scmNodeIds;
+
+  private String currentProxySCMNodeId;
+  private int currentProxyIndex;
+
+  private final ConfigurationSource conf;
+  private final SCMClientConfig scmClientConfig;
+  private final long scmVersion;
+
+  private String scmServiceId;
+
+  private final int maxRetryCount;
+  private final long retryInterval;
+
+  private final UserGroupInformation ugi;
+
+  /**
+   * Construct fail-over proxy provider for SCMSecurityProtocol Server.
+   * @param conf
+   * @param userGroupInformation
+   */
+  public SCMSecurityProtocolFailoverProxyProvider(ConfigurationSource conf,
+      UserGroupInformation userGroupInformation) {
+    Preconditions.checkNotNull(userGroupInformation);
+    this.ugi = userGroupInformation;
+    this.conf = conf;
+    this.scmVersion = RPC.getProtocolVersion(SCMSecurityProtocolPB.class);
+
+    this.scmProxies = new HashMap<>();
+    this.scmProxyInfoMap = new HashMap<>();
+    loadConfigs();
+
+    this.currentProxyIndex = 0;
+    currentProxySCMNodeId = scmNodeIds.get(currentProxyIndex);
+    scmClientConfig = conf.getObject(SCMClientConfig.class);
+    this.maxRetryCount = scmClientConfig.getRetryCount();
+    this.retryInterval = scmClientConfig.getRetryInterval();
+  }
+
+  protected void loadConfigs() {
+    List<SCMNodeInfo> scmNodeInfoList = SCMNodeInfo.buildNodeInfo(conf);
+    scmNodeIds = new ArrayList<>();
+
+    for (SCMNodeInfo scmNodeInfo : scmNodeInfoList) {
+      if (scmNodeInfo.getScmSecurityAddress() == null) {
+        throw new ConfigurationException("SCM Client Address could not " +
+            "be obtained from config. Config is not properly defined");
+      } else {
+        InetSocketAddress scmSecurityAddress =
+            NetUtils.createSocketAddr(scmNodeInfo.getScmSecurityAddress());
+
+        scmServiceId = scmNodeInfo.getServiceId();
+        String scmNodeId = scmNodeInfo.getNodeId();
+
+        scmNodeIds.add(scmNodeId);
+        SCMProxyInfo scmProxyInfo = new SCMProxyInfo(scmServiceId, scmNodeId,
+            scmSecurityAddress);
+        scmProxyInfoMap.put(scmNodeId, scmProxyInfo);
+      }
+    }
+  }
+
+  @Override
+  public synchronized ProxyInfo<SCMSecurityProtocolPB> getProxy() {
+    ProxyInfo currentProxyInfo = scmProxies.get(getCurrentProxySCMNodeId());
+    if (currentProxyInfo == null) {
+      currentProxyInfo = createSCMProxy(getCurrentProxySCMNodeId());
+    }
+    return currentProxyInfo;
+  }
+
+  /**
+   * Creates proxy object.
+   */
+  private ProxyInfo createSCMProxy(String nodeId) {
+    ProxyInfo proxyInfo;
+    SCMProxyInfo scmProxyInfo = scmProxyInfoMap.get(nodeId);
+    InetSocketAddress address = scmProxyInfo.getAddress();
+    try {
+      SCMSecurityProtocolPB scmProxy = createSCMProxy(address);
+      // Create proxyInfo here, to make it work with all Hadoop versions.
+      proxyInfo = new ProxyInfo<>(scmProxy, scmProxyInfo.toString());
+      scmProxies.put(nodeId, proxyInfo);
+      return proxyInfo;
+    } catch (IOException ioe) {
+      LOG.error("{} Failed to create RPC proxy to SCM at {}",
+          this.getClass().getSimpleName(), address, ioe);
+      throw new RuntimeException(ioe);
+    }
+  }
+
+  private SCMSecurityProtocolPB createSCMProxy(InetSocketAddress scmAddress)
+      throws IOException {
+    Configuration hadoopConf =
+        LegacyHadoopConfigurationSource.asHadoopConfiguration(conf);
+    RPC.setProtocolEngine(hadoopConf, SCMSecurityProtocolPB.class,
+        ProtobufRpcEngine.class);
+
+    // FailoverOnNetworkException ensures that the IPC layer does not attempt
+    // retries on the same SCM in case of connection exception. This retry
+    // policy essentially results in TRY_ONCE_THEN_FAIL.
+
+    RetryPolicy connectionRetryPolicy = RetryPolicies
+        .failoverOnNetworkException(0);
+
+    return RPC.getProtocolProxy(SCMSecurityProtocolPB.class,
+        scmVersion, scmAddress, ugi,
+        hadoopConf, NetUtils.getDefaultSocketFactory(hadoopConf),
+        (int)scmClientConfig.getRpcTimeOut(), connectionRetryPolicy).getProxy();
+  }
+
+
+  @Override
+  public void performFailover(SCMSecurityProtocolPB currentProxy) {
+    if (LOG.isDebugEnabled()) {
+      int currentIndex = getCurrentProxyIndex();
+      LOG.debug("Failing over SCM Security proxy to index: {}, nodeId: {}",
+          currentIndex, scmNodeIds.get(currentIndex));
+    }
+  }
+
+  /**
+   * Performs fail-over to the next proxy.
+   */
+  public void performFailoverToNextProxy() {
+    int newProxyIndex = incrementProxyIndex();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Incrementing SCM Security proxy index to {}, nodeId: {}",
+          newProxyIndex, scmNodeIds.get(newProxyIndex));
+    }
+  }
+
+  /**
+   * Update the proxy index to the next proxy in the list.
+   * @return the new proxy index
+   */
+  private synchronized int incrementProxyIndex() {
+    currentProxyIndex = (currentProxyIndex + 1) % scmProxies.size();
+    currentProxySCMNodeId = scmNodeIds.get(currentProxyIndex);
+    return currentProxyIndex;
+  }
+
+  public RetryPolicy getRetryPolicy() {
+    // Client will attempt up to maxFailovers number of failovers between
+    // available SCMs before throwing exception.
+    RetryPolicy retryPolicy = new RetryPolicy() {
+      @Override
+      public RetryAction shouldRetry(Exception exception, int retries,
+          int failovers, boolean isIdempotentOrAtMostOnce)
+          throws Exception {
+
+        if (LOG.isDebugEnabled()) {
+          if (exception.getCause() != null) {
+            LOG.debug("RetryProxy: SCM Security Server {}: {}: {}",
+                getCurrentProxySCMNodeId(),
+                exception.getCause().getClass().getSimpleName(),
+                exception.getCause().getMessage());
+          } else {
+            LOG.debug("RetryProxy: SCM {}: {}", getCurrentProxySCMNodeId(),
+                exception.getMessage());
+          }
+        }
+
+        // For AccessControl Exception where Client is not authentica
+        if (HAUtils.isAccessControlException(exception)) {
+          return RetryAction.FAIL;
+        }
+
+        // Perform fail over to next proxy, as right now we don't have any
+        // suggested leader ID from server, we fail over to next one.
+        // TODO: Act based on server response if leader id is passed.
+        performFailoverToNextProxy();
+        return getRetryAction(FAILOVER_AND_RETRY, failovers);
+      }
+
+      private RetryAction getRetryAction(RetryDecision fallbackAction,
+          int failovers) {
+        if (failovers < maxRetryCount) {
+          return new RetryAction(fallbackAction, getRetryInterval());
+        } else {
+          return RetryAction.FAIL;
+        }
+      }
+    };
+
+    return retryPolicy;
+  }
+
+
+  @Override
+  public Class< SCMSecurityProtocolPB > getInterface() {
+    return SCMSecurityProtocolPB.class;
+  }
+
+  @Override
+  public void close() throws IOException {
+    for (Map.Entry<String, ProxyInfo<SCMSecurityProtocolPB>> proxy :
+        scmProxies.entrySet()) {
+      if (proxy.getValue() != null) {
+        RPC.stopProxy(proxy.getValue());
+      }
+      scmProxies.remove(proxy.getKey());
+    }
+  }
+
+  public synchronized String getCurrentProxySCMNodeId() {
+    return currentProxySCMNodeId;
+  }
+
+  public synchronized int getCurrentProxyIndex() {
+    return currentProxyIndex;
+  }
+
+  private long getRetryInterval() {
+    return retryInterval;
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java
similarity index 87%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java
index 4944017..e3bb058 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java
@@ -15,8 +15,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
+package org.apache.hadoop.hdds.scm.proxy;
 
 /**
- * This package contains classes related to Apache Ratis for SCM.
+ * This package contains classes related to scm proxy.
  */
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
index f0443d8..4fa60d7 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
@@ -19,7 +19,8 @@
 
 package org.apache.hadoop.hdds.security.x509.certificate.authority;
 
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType;
@@ -80,13 +81,14 @@
    *
    * @param csr  - Certificate Signing Request.
    * @param type - An Enum which says what kind of approval process to follow.
+   * @param role : OM/SCM/DN
    * @return A future that will have this certificate when this request is
    * approved.
    * @throws SCMSecurityException - on Error.
    */
   Future<X509CertificateHolder> requestCertificate(
       PKCS10CertificationRequest csr,
-      CertificateApprover.ApprovalType type)
+      CertificateApprover.ApprovalType type, NodeType role)
       throws SCMSecurityException;
 
 
@@ -95,12 +97,13 @@
    *
    * @param csr - Certificate Signing Request as a PEM encoded String.
    * @param type - An Enum which says what kind of approval process to follow.
+   * @param nodeType: OM/SCM/DN
    * @return A future that will have this certificate when this request is
    * approved.
    * @throws SCMSecurityException - on Error.
    */
   Future<X509CertificateHolder> requestCertificate(String csr,
-      ApprovalType type) throws IOException;
+      ApprovalType type, NodeType nodeType) throws IOException;
 
   /**
    * Revokes a Certificate issued by this CertificateServer.
@@ -119,16 +122,23 @@
 
   /**
    * List certificates.
-   * @param type            - node type: OM/SCM/DN
+   * @param role            - role: OM/SCM/DN
    * @param startSerialId   - start certificate serial id
    * @param count           - max number of certificates returned in a batch
    * @return List of X509 Certificates.
    * @throws IOException - On Failure
    */
-  List<X509Certificate> listCertificate(HddsProtos.NodeType type,
+  List<X509Certificate> listCertificate(NodeType role,
       long startSerialId, int count, boolean isRevoked) throws IOException;
 
   /**
+   * Reinitialise the certificate server withe the SCMMetastore during SCM
+   * state reload post install db checkpoint.
+   * @param scmMetadataStore
+   */
+  void reinitialize(SCMMetadataStore scmMetadataStore);
+
+  /**
    * Make it explicit what type of CertificateServer we are creating here.
    */
   enum CAType {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java
index 8a265c6..0a669b9 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java
@@ -19,9 +19,11 @@
 
 package org.apache.hadoop.hdds.security.x509.certificate.authority;
 
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.metadata.Replicate;
 import org.bouncycastle.asn1.x509.CRLReason;
 import org.bouncycastle.cert.X509CertificateHolder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 
 import java.io.IOException;
 import java.math.BigInteger;
@@ -45,10 +47,20 @@
    * Writes a new certificate that was issued to the persistent store.
    * @param serialID - Certificate Serial Number.
    * @param certificate - Certificate to persist.
+   * @param role - OM/DN/SCM.
    * @throws IOException - on Failure.
    */
+  @Replicate
   void storeValidCertificate(BigInteger serialID,
-                             X509Certificate certificate) throws IOException;
+      X509Certificate certificate, NodeType role) throws IOException;
+
+  /**
+   * Check certificate serialID exists or not. If exists throws an exception.
+   * @param serialID
+   * @throws IOException
+   */
+  void checkValidCertID(BigInteger serialID) throws IOException;
+
 
   /**
    * Adds the certificates to be revoked to a new CRL and moves all the
@@ -99,16 +111,21 @@
    * @return list of X509 certificates.
    * @throws IOException - on failure.
    */
-  List<X509Certificate> listCertificate(HddsProtos.NodeType role,
+  List<X509Certificate> listCertificate(NodeType role,
       BigInteger startSerialID, int count, CertType certType)
       throws IOException;
 
   /**
+   * Reinitialize the certificate server.
+   * @param metadataStore SCMMetaStore.
+   */
+  void reinitialize(SCMMetadataStore metadataStore);
+
+  /**
    * Different kind of Certificate stores.
    */
   enum CertType {
     VALID_CERTS,
     REVOKED_CERTS
   }
-
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
index 7ea28b6..39f610c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java
@@ -89,7 +89,8 @@
       Date validTill,
       PKCS10CertificationRequest certificationRequest,
       String scmId,
-      String clusterId) throws IOException, OperatorCreationException {
+      String clusterId) throws IOException,
+      OperatorCreationException {
 
     AlgorithmIdentifier sigAlgId = new
         DefaultSignatureAlgorithmIdentifierFinder().find(
@@ -135,7 +136,7 @@
         new X509v3CertificateBuilder(
             caCertificate.getSubject(),
             // Serial is not sequential but it is monotonically increasing.
-            BigInteger.valueOf(Time.monotonicNowNanos()),
+            BigInteger.valueOf(generateSerialId()),
             validFrom,
             validTill,
             x500Name, keyInfo);
@@ -155,6 +156,12 @@
 
   }
 
+  public long generateSerialId() {
+    // TODO: to make generation of serialId distributed.
+    // This issue will be fixed in HDDS-4999.
+    return Time.monotonicNowNanos();
+  }
+
   @Override
   public CompletableFuture<X509CertificateHolder> inspectCSR(String csr)
       throws IOException {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
index 3b1a47e..7622dda 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
@@ -23,10 +23,10 @@
 import com.google.common.base.Preconditions;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.validator.routines.DomainValidator;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile;
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate;
@@ -59,9 +59,12 @@
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.Future;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
 import java.util.function.Consumer;
 
 import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getCertificationRequest;
+import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.UNABLE_TO_ISSUE_CERTIFICATE;
 import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.CSR_ERROR;
 
 /**
@@ -116,7 +119,7 @@
   private final String subject;
   private final String clusterID;
   private final String scmID;
-  private String componentName = Paths.get("scm", "ca").toString();
+  private String componentName;
   private Path caKeysPath;
   private Path caRootX509Path;
   private SecurityConfig config;
@@ -127,6 +130,7 @@
   private CertificateApprover approver;
   private CRLApprover crlApprover;
   private CertificateStore store;
+  private Lock lock;
 
   /**
    * Create an Instance of DefaultCAServer.
@@ -136,11 +140,15 @@
    * @param certificateStore - A store used to persist Certificates.
    */
   public DefaultCAServer(String subject, String clusterID, String scmID,
-                         CertificateStore certificateStore) {
+                         CertificateStore certificateStore,
+      PKIProfile pkiProfile, String componentName) {
     this.subject = subject;
     this.clusterID = clusterID;
     this.scmID = scmID;
     this.store = certificateStore;
+    this.profile = pkiProfile;
+    this.componentName = componentName;
+    lock = new ReentrantLock();
   }
 
   @Override
@@ -149,28 +157,18 @@
     caKeysPath = securityConfig.getKeyLocation(componentName);
     caRootX509Path = securityConfig.getCertificateLocation(componentName);
     this.config = securityConfig;
-
-    // TODO: Make these configurable and load different profiles based on
-    // config.
-    profile = new DefaultProfile();
     this.approver = new DefaultApprover(profile, this.config);
 
     /* In future we will split this code to have different kind of CAs.
      * Right now, we have only self-signed CertificateServer.
      */
 
-    if (type == CAType.SELF_SIGNED_CA) {
-      VerificationStatus status = verifySelfSignedCA(securityConfig);
-      Consumer<SecurityConfig> caInitializer =
-          processVerificationStatus(status);
-      caInitializer.accept(securityConfig);
-      crlApprover = new DefaultCRLApprover(securityConfig,
-          getCAKeys().getPrivate());
-      return;
-    }
-
-    LOG.error("We support only Self-Signed CAs for now.");
-    throw new IllegalStateException("Not implemented functionality requested.");
+    VerificationStatus status = verifySelfSignedCA(securityConfig);
+    Consumer<SecurityConfig> caInitializer =
+        processVerificationStatus(status, type);
+    caInitializer.accept(securityConfig);
+    crlApprover = new DefaultCRLApprover(securityConfig,
+        getCAKeys().getPrivate());
   }
 
   @Override
@@ -213,7 +211,7 @@
   @Override
   public Future<X509CertificateHolder> requestCertificate(
       PKCS10CertificationRequest csr,
-      CertificateApprover.ApprovalType approverType) {
+      CertificateApprover.ApprovalType approverType, NodeType role) {
     LocalDate beginDate = LocalDate.now().atStartOfDay().toLocalDate();
     LocalDateTime temp = LocalDateTime.of(beginDate, LocalTime.MIDNIGHT);
     LocalDate endDate =
@@ -238,12 +236,12 @@
       case TESTING_AUTOMATIC:
         X509CertificateHolder xcert;
         try {
-          xcert = signAndStoreCertificate(beginDate, endDate, csr);
+          xcert = signAndStoreCertificate(beginDate, endDate, csr, role);
         } catch (SCMSecurityException e) {
           // Certificate with conflicting serial id, retry again may resolve
           // this issue.
           LOG.error("Certificate storage failed, retrying one more time.", e);
-          xcert = signAndStoreCertificate(beginDate, endDate, csr);
+          xcert = signAndStoreCertificate(beginDate, endDate, csr, role);
         }
 
         xcertHolder.complete(xcert);
@@ -253,29 +251,42 @@
       }
     } catch (CertificateException | IOException | OperatorCreationException e) {
       LOG.error("Unable to issue a certificate.", e);
-      xcertHolder.completeExceptionally(new SCMSecurityException(e));
+      xcertHolder.completeExceptionally(
+          new SCMSecurityException(e, UNABLE_TO_ISSUE_CERTIFICATE));
     }
     return xcertHolder;
   }
 
   private X509CertificateHolder signAndStoreCertificate(LocalDate beginDate,
-      LocalDate endDate, PKCS10CertificationRequest csr) throws IOException,
+      LocalDate endDate, PKCS10CertificationRequest csr, NodeType role)
+      throws IOException,
       OperatorCreationException, CertificateException {
-    X509CertificateHolder xcert = approver.sign(config,
-        getCAKeys().getPrivate(),
-        getCACertificate(), java.sql.Date.valueOf(beginDate),
-        java.sql.Date.valueOf(endDate), csr, scmID, clusterID);
-    store.storeValidCertificate(xcert.getSerialNumber(),
-        CertificateCodec.getX509Certificate(xcert));
+
+    lock.lock();
+    X509CertificateHolder xcert;
+    try {
+      xcert = approver.sign(config,
+          getCAKeys().getPrivate(),
+          getCACertificate(), java.sql.Date.valueOf(beginDate),
+          java.sql.Date.valueOf(endDate), csr, scmID, clusterID);
+      if (store != null) {
+        store.checkValidCertID(xcert.getSerialNumber());
+        store.storeValidCertificate(xcert.getSerialNumber(),
+            CertificateCodec.getX509Certificate(xcert), role);
+      }
+    } finally {
+      lock.unlock();
+    }
     return xcert;
   }
 
   @Override
   public Future<X509CertificateHolder> requestCertificate(String csr,
-      CertificateApprover.ApprovalType type) throws IOException {
+      CertificateApprover.ApprovalType type, NodeType nodeType)
+      throws IOException {
     PKCS10CertificationRequest request =
         getCertificationRequest(csr);
-    return requestCertificate(request, type);
+    return requestCertificate(request, type, nodeType);
   }
 
   @Override
@@ -302,23 +313,19 @@
     return revoked;
   }
 
-  /**
-   *
-   * @param role            - node type: OM/SCM/DN.
-   * @param startSerialId   - start cert serial id.
-   * @param count           - max number of certificates returned in a batch.
-   * @param isRevoked       - whether return revoked cert only.
-   * @return
-   * @throws IOException
-   */
   @Override
-  public List<X509Certificate> listCertificate(HddsProtos.NodeType role,
+  public List<X509Certificate> listCertificate(NodeType role,
       long startSerialId, int count, boolean isRevoked) throws IOException {
     return store.listCertificate(role, BigInteger.valueOf(startSerialId), count,
         isRevoked? CertificateStore.CertType.REVOKED_CERTS :
             CertificateStore.CertType.VALID_CERTS);
   }
 
+  @Override
+  public void reinitialize(SCMMetadataStore scmMetadataStore) {
+    store.reinitialize(scmMetadataStore);
+  }
+
   /**
    * Generates a Self Signed CertificateServer. These are the steps in
    * generating a Self-Signed CertificateServer.
@@ -413,7 +420,7 @@
    */
   @VisibleForTesting
   Consumer<SecurityConfig> processVerificationStatus(
-      VerificationStatus status) {
+      VerificationStatus status,  CAType type) {
     Consumer<SecurityConfig> consumer = null;
     switch (status) {
     case SUCCESS:
@@ -441,19 +448,31 @@
       };
       break;
     case INITIALIZE:
-      consumer = (arg) -> {
-        try {
-          generateSelfSignedCA(arg);
-        } catch (NoSuchProviderException | NoSuchAlgorithmException
-            | IOException e) {
-          LOG.error("Unable to initialize CertificateServer.", e);
-        }
-        VerificationStatus newStatus = verifySelfSignedCA(arg);
-        if (newStatus != VerificationStatus.SUCCESS) {
-          LOG.error("Unable to initialize CertificateServer, failed in " +
-              "verification.");
-        }
-      };
+      if (type == CAType.SELF_SIGNED_CA) {
+        consumer = (arg) -> {
+          try {
+            generateSelfSignedCA(arg);
+          } catch (NoSuchProviderException | NoSuchAlgorithmException
+              | IOException e) {
+            LOG.error("Unable to initialize CertificateServer.", e);
+          }
+          VerificationStatus newStatus = verifySelfSignedCA(arg);
+          if (newStatus != VerificationStatus.SUCCESS) {
+            LOG.error("Unable to initialize CertificateServer, failed in " +
+                "verification.");
+          }
+        };
+      } else if (type == CAType.INTERMEDIARY_CA) {
+        // For sub CA certificates are generated during bootstrap/init. If
+        // both keys/certs are missing, init/bootstrap is missed to be
+        // performed.
+        consumer = (arg) -> {
+          LOG.error("Sub SCM CA Server is missing keys/certs. SCM is started " +
+              "with out init/bootstrap");
+          throw new IllegalStateException("INTERMEDIARY_CA Should not be" +
+              " in Initialize State during startup.");
+        };
+      }
       break;
     default:
       /* Make CheckStyle happy */
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
index 53eb98f..a146c73 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java
@@ -19,8 +19,12 @@
 
 package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles;
 
+import org.bouncycastle.asn1.ASN1ObjectIdentifier;
+import org.bouncycastle.asn1.x509.BasicConstraints;
 import org.bouncycastle.asn1.x509.Extension;
+import org.bouncycastle.asn1.x509.KeyUsage;
 
+import java.util.Map;
 import java.util.function.BiFunction;
 
 import static java.lang.Boolean.TRUE;
@@ -32,7 +36,7 @@
  */
 public class DefaultCAProfile extends DefaultProfile {
   static final BiFunction<Extension, PKIProfile, Boolean>
-      VALIDATE_BASIC_CONSTRAINTS = (e, b) -> TRUE;
+      VALIDATE_BASIC_CONSTRAINTS = DefaultCAProfile::validateBasicExtensions;
   static final BiFunction<Extension, PKIProfile, Boolean>
       VALIDATE_CRL_NUMBER = (e, b) -> TRUE;
   static final BiFunction<Extension, PKIProfile, Boolean>
@@ -43,4 +47,38 @@
       VALIDATE_NAME_CONSTRAINTS = (e, b) -> TRUE;
   static final BiFunction<Extension, PKIProfile, Boolean>
       VALIDATE_CRL_DISTRIBUTION_POINTS = (e, b) -> TRUE;
-}
+
+
+  private static boolean validateBasicExtensions(Extension ext,
+      PKIProfile pkiProfile) {
+    BasicConstraints constraints =
+        BasicConstraints.getInstance(ext.getParsedValue());
+    if(constraints.isCA()) {
+      if (pkiProfile.isCA()) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public boolean isCA() {
+    return true;
+  }
+
+  @Override
+  public Map<ASN1ObjectIdentifier,
+      BiFunction< Extension, PKIProfile, Boolean>> getExtensionsMap() {
+    // Add basic constraint.
+    EXTENSIONS_MAP.putIfAbsent(Extension.basicConstraints,
+        VALIDATE_BASIC_CONSTRAINTS);
+    return EXTENSIONS_MAP;
+  }
+
+  @Override
+  public KeyUsage getKeyUsage() {
+    return new KeyUsage(KeyUsage.digitalSignature | KeyUsage.keyEncipherment
+        | KeyUsage.dataEncipherment | KeyUsage.keyAgreement | KeyUsage.cRLSign
+        | KeyUsage.keyCertSign);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java
index 18659dc..7791fa9 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java
@@ -77,7 +77,7 @@
       GeneralName.otherName,
   };
   // Map that handles all the Extensions lookup and validations.
-  private static final Map<ASN1ObjectIdentifier, BiFunction<Extension,
+  protected static final Map<ASN1ObjectIdentifier, BiFunction<Extension,
       PKIProfile, Boolean>> EXTENSIONS_MAP = Stream.of(
       new SimpleEntry<>(Extension.keyUsage, VALIDATE_KEY_USAGE),
       new SimpleEntry<>(Extension.subjectAlternativeName, VALIDATE_SAN),
@@ -266,7 +266,7 @@
    */
   @Override
   public ASN1ObjectIdentifier[] getSupportedExtensions() {
-    return EXTENSIONS_MAP.keySet().toArray(new ASN1ObjectIdentifier[0]);
+    return getExtensionsMap().keySet().toArray(new ASN1ObjectIdentifier[0]);
   }
 
   /**
@@ -274,7 +274,7 @@
    */
   @Override
   public boolean isSupportedExtension(Extension extension) {
-    return EXTENSIONS_MAP.containsKey(extension.getExtnId());
+    return getExtensionsMap().containsKey(extension.getExtnId());
   }
 
   /**
@@ -337,4 +337,10 @@
   public boolean isCA() {
     return false;
   }
+
+  @Override
+  public Map<ASN1ObjectIdentifier, BiFunction< Extension, PKIProfile,
+      Boolean>> getExtensionsMap() {
+    return EXTENSIONS_MAP;
+  }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java
index c3ff198..c4878dd 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java
@@ -26,6 +26,8 @@
 import org.bouncycastle.asn1.x509.KeyUsage;
 
 import java.net.UnknownHostException;
+import java.util.Map;
+import java.util.function.BiFunction;
 
 /**
  * Base class for profile rules. Generally profiles are documents that define
@@ -137,4 +139,11 @@
    * @return  True, if the profile used is for CA, false otherwise.
    */
   boolean isCA();
+
+  /**
+   * Return all extensions supported by this profile.
+   * @return
+   */
+  Map<ASN1ObjectIdentifier,
+        BiFunction< Extension, PKIProfile, Boolean> > getExtensionsMap();
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java
index c776398..0ec4d42 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java
@@ -212,4 +212,10 @@
    */
   String getSecurityProvider();
 
+  /**
+   * Return component name of this certificate client.
+   * @return component name
+   */
+  String getComponentName();
+
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
index 40c5b0a..8c7c9f0 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java
@@ -64,4 +64,9 @@
   public Logger getLogger() {
     return LOG;
   }
+
+  @Override
+  public String getComponentName() {
+    return COMPONENT_NAME;
+  }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index 8ee0019..1b04356 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -839,4 +839,8 @@
                 Client.getRpcTimeout(conf)));
     return scmSecurityClient;
   }
+
+  public String getComponentName() {
+    return null;
+  }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
index 2e1b204..0c7054a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java
@@ -124,4 +124,9 @@
   public Logger getLogger() {
     return LOG;
   }
+
+  @Override
+  public String getComponentName() {
+    return COMPONENT_NAME;
+  }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
new file mode 100644
index 0000000..d1f9040
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/SCMCertificateClient.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.security.x509.certificate.client;
+
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
+import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.file.Paths;
+
+import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE;
+import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT;
+import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER;
+import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS;
+
+/**
+ * SCM Certificate Client which is used for generating public/private Key pair,
+ * generate CSR and finally obtain signed certificate. This Certificate
+ * client is used for setting up sub CA by SCM.
+ */
+public class SCMCertificateClient extends DefaultCertificateClient {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMCertificateClient.class);
+
+  public static final String COMPONENT_NAME =
+      Paths.get(OzoneConsts.SCM_CA_CERT_STORAGE_DIR,
+          OzoneConsts.SCM_SUB_CA_PATH).toString();
+
+  public SCMCertificateClient(SecurityConfig securityConfig,
+      String certSerialId) {
+    super(securityConfig, LOG, certSerialId, COMPONENT_NAME);
+  }
+
+  public SCMCertificateClient(SecurityConfig securityConfig) {
+    super(securityConfig, LOG, null, COMPONENT_NAME);
+  }
+
+  @Override
+  protected InitResponse handleCase(InitCase init)
+      throws CertificateException {
+    // This is similar to OM.
+    switch (init) {
+    case NONE:
+      LOG.info("Creating keypair for client as keypair and certificate not " +
+          "found.");
+      bootstrapClientKeys();
+      return GETCERT;
+    case CERT:
+      LOG.error("Private key not found, while certificate is still present." +
+          "Delete keypair and try again.");
+      return FAILURE;
+    case PUBLIC_KEY:
+      LOG.error("Found public key but private key and certificate missing.");
+      return FAILURE;
+    case PRIVATE_KEY:
+      LOG.info("Found private key but public key and certificate is missing.");
+      // TODO: Recovering public key from private might be possible in some
+      //  cases.
+      return FAILURE;
+    case PUBLICKEY_CERT:
+      LOG.error("Found public key and certificate but private key is " +
+          "missing.");
+      return FAILURE;
+    case PRIVATEKEY_CERT:
+      LOG.info("Found private key and certificate but public key missing.");
+      if (recoverPublicKey()) {
+        return SUCCESS;
+      } else {
+        LOG.error("Public key recovery failed.");
+        return FAILURE;
+      }
+    case PUBLICKEY_PRIVATEKEY:
+      LOG.info("Found private and public key but certificate is missing.");
+      if (validateKeyPair(getPublicKey())) {
+        return RECOVER;
+      } else {
+        LOG.error("Keypair validation failed.");
+        return FAILURE;
+      }
+    case ALL:
+      LOG.info("Found certificate file along with KeyPair.");
+      if (validateKeyPairAndCertificate()) {
+        return SUCCESS;
+      } else {
+        return FAILURE;
+      }
+    default:
+      LOG.error("Unexpected case: {} (private/public/cert)",
+          Integer.toBinaryString(init.ordinal()));
+      return FAILURE;
+    }
+  }
+
+  /**
+   * Returns a CSR builder that can be used to creates a Certificate signing
+   * request.
+   *
+   * @return CertificateSignRequest.Builder
+   */
+  @Override
+  public CertificateSignRequest.Builder getCSRBuilder()
+      throws CertificateException {
+    return super.getCSRBuilder()
+        .setDigitalEncryption(true)
+        .setDigitalSignature(true)
+        // Set CA to true, as this will be used to sign certs for OM/DN.
+        .setCA(true);
+  }
+
+
+  @Override
+  public Logger getLogger() {
+    return LOG;
+  }
+
+  @Override
+  public String getComponentName() {
+    return COMPONENT_NAME;
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
index b26ad2c..b8d2859 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
@@ -59,6 +59,8 @@
 import org.bouncycastle.util.io.pem.PemObject;
 import org.bouncycastle.util.io.pem.PemReader;
 
+import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.INVALID_CSR;
+
 /**
  * A certificate sign request object that wraps operations to build a
  * PKCS10CertificationRequest to CertificateServer.
@@ -134,7 +136,8 @@
     try (PemReader reader = new PemReader(new StringReader(csr))) {
       PemObject pemObject = reader.readPemObject();
       if(pemObject.getContent() == null) {
-        throw new SCMSecurityException("Invalid Certificate signing request");
+        throw new SCMSecurityException("Invalid Certificate signing request",
+            INVALID_CSR);
       }
       return new PKCS10CertificationRequest(pemObject.getContent());
     }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBStoreHAManager.java
similarity index 67%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBStoreHAManager.java
index 4944017..85851ea 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBStoreHAManager.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -6,17 +6,26 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
+package org.apache.hadoop.hdds.utils;
+
+import org.apache.hadoop.hdds.utils.db.Table;
 
 /**
- * This package contains classes related to Apache Ratis for SCM.
+ * Interface defined for getting HA related specific info from DB for SCM and
+ * OM.
  */
+public interface DBStoreHAManager {
+
+  default Table<String, TransactionInfo> getTransactionInfoTable() {
+    return null;
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
new file mode 100644
index 0000000..6bf85ca
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.utils;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeInfo;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider;
+import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.utils.db.DBDefinition;
+import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.ozone.OzoneSecurityUtil;
+import org.apache.ratis.util.ExitUtils;
+import org.apache.ratis.util.FileUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER;
+import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
+
+/**
+ * utility class used by SCM and OM for HA.
+ */
+public final class HAUtils {
+  public static final Logger LOG = LoggerFactory.getLogger(HAUtils.class);
+
+  private HAUtils() {
+  }
+
+  public static ScmInfo getScmInfo(OzoneConfiguration conf)
+      throws IOException {
+    try {
+      return getScmBlockClient(conf).getScmInfo();
+    } catch (IOException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new IOException("Failed to get SCM info", e);
+    }
+  }
+
+  /**
+   * Add SCM to the cluster.
+   * @param conf - OzoneConfiguration
+   * @param request - AddSCMRequest which has details of SCM to be added.
+   * @param selfId - Node Id of the SCM which is submitting the request to
+   * add SCM.
+   * @return true - if SCM node is added successfully, else false.
+   * @throws IOException
+   */
+  public static boolean addSCM(OzoneConfiguration conf, AddSCMRequest request,
+      String selfId) throws IOException {
+    OzoneConfiguration config = SCMHAUtils.removeSelfId(conf, selfId);
+    try {
+      return getScmBlockClient(config).addSCM(request);
+    } catch (IOException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new IOException("Failed to add SCM", e);
+    }
+  }
+
+  /**
+   * Create a scm block client.
+   *
+   * @return {@link ScmBlockLocationProtocol}
+   * @throws IOException
+   */
+  public static ScmBlockLocationProtocol getScmBlockClient(
+      OzoneConfiguration conf) throws IOException {
+    ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
+        new ScmBlockLocationProtocolClientSideTranslatorPB(
+            new SCMBlockLocationFailoverProxyProvider(conf));
+    return TracingUtil
+        .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class,
+            conf);
+  }
+
+  /**
+   * Replace the current DB with the new DB checkpoint.
+   *
+   * @param lastAppliedIndex the last applied index in the current SCM DB.
+   * @param checkpointPath   path to the new DB checkpoint
+   * @return location of backup of the original DB
+   * @throws Exception
+   */
+  public static File replaceDBWithCheckpoint(long lastAppliedIndex,
+      File oldDB, Path checkpointPath, String dbPrefix) throws IOException {
+
+    // Take a backup of the current DB
+    String dbBackupName =
+        dbPrefix + lastAppliedIndex + "_" + System
+            .currentTimeMillis();
+    File dbDir = oldDB.getParentFile();
+    File dbBackup = new File(dbDir, dbBackupName);
+
+    try {
+      Files.move(oldDB.toPath(), dbBackup.toPath());
+    } catch (IOException e) {
+      LOG.error("Failed to create a backup of the current DB. Aborting "
+          + "snapshot installation.");
+      throw e;
+    }
+
+    // Move the new DB checkpoint into the metadata dir
+    Path markerFile = new File(dbDir, DB_TRANSIENT_MARKER).toPath();
+    try {
+      // Create a Transient Marker file. This file will be deleted if the
+      // checkpoint DB is successfully moved to the old DB location or if the
+      // old DB backup is reset to its location. If not, then the DB is in
+      // an inconsistent state and this marker file will fail it from
+      // starting up.
+      Files.createFile(markerFile);
+      Files.move(checkpointPath, oldDB.toPath());
+      Files.deleteIfExists(markerFile);
+    } catch (IOException e) {
+      LOG.error("Failed to move downloaded DB checkpoint {} to metadata "
+              + "directory {}. Resetting to original DB.", checkpointPath,
+          oldDB.toPath());
+      try {
+        Files.move(dbBackup.toPath(), oldDB.toPath());
+        Files.deleteIfExists(markerFile);
+      } catch (IOException ex) {
+        String errorMsg = "Failed to reset to original DB. SCM is in an "
+            + "inconsistent state.";
+        ExitUtils.terminate(1, errorMsg, ex, LOG);
+      }
+      throw e;
+    }
+    return dbBackup;
+  }
+
+  /**
+   * Obtain SCMTransactionInfo from Checkpoint.
+   */
+  public static TransactionInfo getTrxnInfoFromCheckpoint(
+      OzoneConfiguration conf, Path dbPath, DBDefinition definition)
+      throws Exception {
+
+    if (dbPath != null) {
+      Path dbDir = dbPath.getParent();
+      Path dbFile = dbPath.getFileName();
+      if (dbDir != null && dbFile != null) {
+        return getTransactionInfoFromDB(conf, dbDir, dbFile.toString(),
+            definition);
+      }
+    }
+
+    throw new IOException("Checkpoint " + dbPath + " does not have proper " +
+        "DB location");
+  }
+
+  /**
+   * Obtain Transaction info from DB.
+   * @param tempConfig
+   * @param dbDir path to DB
+   * @return TransactionInfo
+   * @throws Exception
+   */
+  private static TransactionInfo getTransactionInfoFromDB(
+      OzoneConfiguration tempConfig, Path dbDir, String dbName,
+      DBDefinition definition)
+      throws Exception {
+    DBStore dbStore = loadDB(tempConfig, dbDir.toFile(),
+        dbName, definition);
+
+    // Get the table name with TransactionInfo as the value. The transaction
+    // info table name are different in SCM and SCM.
+
+    // In case, a new table gets added where the value is TransactionInfo, this
+    // logic may not work.
+
+
+    Table<String, TransactionInfo> transactionInfoTable =
+        getTransactionInfoTable(dbStore, definition);
+
+    TransactionInfo transactionInfo =
+        transactionInfoTable.get(TRANSACTION_INFO_KEY);
+    dbStore.close();
+
+    if (transactionInfo == null) {
+      throw new IOException("Failed to read TransactionInfo from DB " +
+          definition.getName() + " at " + dbDir);
+    }
+    return transactionInfo;
+  }
+
+  public static Table<String, TransactionInfo> getTransactionInfoTable(
+      DBStore dbStore, DBDefinition definition) throws IOException {
+    return Arrays.stream(definition.getColumnFamilies())
+        .filter(t -> t.getValueType() == TransactionInfo.class).findFirst()
+        .get().getTable(dbStore);
+  }
+
+  /**
+   * Verify transaction info with provided lastAppliedIndex.
+   *
+   * If transaction info transaction Index is less than or equal to
+   * lastAppliedIndex, return false, else return true.
+   * @param transactionInfo
+   * @param lastAppliedIndex
+   * @param leaderId
+   * @param newDBlocation
+   * @return boolean
+   */
+  public static boolean verifyTransactionInfo(TransactionInfo transactionInfo,
+      long lastAppliedIndex, String leaderId, Path newDBlocation,
+      Logger logger) {
+    if (transactionInfo.getTransactionIndex() <= lastAppliedIndex) {
+      logger.error("Failed to install checkpoint from SCM leader: {}"
+              + ". The last applied index: {} is greater than or equal to the "
+              + "checkpoint's applied index: {}. Deleting the downloaded "
+              + "checkpoint {}", leaderId, lastAppliedIndex,
+          transactionInfo.getTransactionIndex(), newDBlocation);
+      try {
+        FileUtils.deleteFully(newDBlocation);
+      } catch (IOException e) {
+        logger.error("Failed to fully delete the downloaded DB "
+            + "checkpoint {} from SCM leader {}.", newDBlocation, leaderId, e);
+      }
+      return false;
+    }
+    return true;
+  }
+
+  public static DBStore loadDB(OzoneConfiguration configuration, File metaDir,
+      String dbName, DBDefinition definition) throws IOException {
+    RocksDBConfiguration rocksDBConfiguration =
+        configuration.getObject(RocksDBConfiguration.class);
+    DBStoreBuilder dbStoreBuilder =
+        DBStoreBuilder.newBuilder(configuration, rocksDBConfiguration)
+            .setName(dbName).setPath(Paths.get(metaDir.getPath()));
+    // Add column family names and codecs.
+    for (DBColumnFamilyDefinition columnFamily : definition
+        .getColumnFamilies()) {
+
+      dbStoreBuilder.addTable(columnFamily.getName());
+      dbStoreBuilder
+          .addCodec(columnFamily.getKeyType(), columnFamily.getKeyCodec());
+      dbStoreBuilder
+          .addCodec(columnFamily.getValueType(), columnFamily.getValueCodec());
+    }
+    return dbStoreBuilder.build();
+  }
+
+  public static File getMetaDir(DBDefinition definition,
+      OzoneConfiguration configuration) {
+    // Set metadata dirs.
+    File metadataDir = definition.getDBLocation(configuration);
+
+    if (metadataDir == null) {
+      LOG.warn("{} is not configured. We recommend adding this setting. "
+              + "Falling back to {} instead.",
+          definition.getLocationConfigKey(), HddsConfigKeys.
+              OZONE_METADATA_DIRS);
+      metadataDir = getOzoneMetaDirPath(configuration);
+    }
+    return metadataDir;
+  }
+
+  /**
+   * Unwrap exception to check if it is some kind of access control problem.
+   * {@link AccessControlException}
+   */
+  public static boolean isAccessControlException(Exception ex) {
+    if (ex instanceof ServiceException) {
+      Throwable t = ex.getCause();
+      if (t instanceof RemoteException) {
+        t = ((RemoteException) t).unwrapRemoteException();
+      }
+      while (t != null) {
+        if (t instanceof AccessControlException) {
+          return true;
+        }
+        t = t.getCause();
+      }
+    }
+    return false;
+  }
+
+  public static void checkSecurityAndSCMHAEnabled(OzoneConfiguration conf) {
+    boolean enable =
+        conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_SECURITY_SUPPORTED,
+            ScmConfigKeys.OZONE_SCM_HA_SECURITY_SUPPORTED_DEFAULT);
+    if (OzoneSecurityUtil.isSecurityEnabled(conf) && !enable) {
+      List<SCMNodeInfo> scmNodeInfo = SCMNodeInfo.buildNodeInfo(conf);
+      if (scmNodeInfo.size() > 1) {
+        System.err.println("Ozone Services cannot be started on a secure SCM " +
+            "HA enabled cluster");
+        System.exit(1);
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index 2c2b5fc..ddc7e04 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -18,30 +18,41 @@
 package org.apache.hadoop.hdds.utils;
 
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.net.InetSocketAddress;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Optional;
 import java.util.OptionalInt;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import com.google.protobuf.BlockingService;
+import org.apache.commons.compress.archivers.ArchiveEntry;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.compress.compressors.CompressorException;
+import org.apache.commons.compress.compressors.CompressorOutputStream;
+import org.apache.commons.compress.compressors.CompressorStreamFactory;
+import org.apache.commons.compress.utils.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
 import org.apache.hadoop.hdds.recon.ReconConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.proxy.SCMSecurityProtocolFailoverProxyProvider;
 import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.metrics2.MetricsException;
@@ -147,7 +158,8 @@
 
     final int port = getPortNumberFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY)
-        .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT);
+        .orElse(conf.getInt(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY,
+            ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
 
     return NetUtils.createSocketAddr(host + ":" + port);
   }
@@ -167,7 +179,8 @@
 
     final int port = getPortNumberFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY)
-        .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+        .orElse(conf.getInt(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+            ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
 
     return NetUtils.createSocketAddr(host + ":" + port);
   }
@@ -419,20 +432,9 @@
    */
   public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient(
       OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress address =
-        getScmAddressForSecurityProtocol(conf);
-    RetryPolicy retryPolicy =
-        RetryPolicies.retryForeverWithFixedSleep(
-            1000, TimeUnit.MILLISECONDS);
     return new SCMSecurityProtocolClientSideTranslatorPB(
-        RPC.getProtocolProxy(SCMSecurityProtocolPB.class, scmVersion,
-            address, UserGroupInformation.getCurrentUser(),
-            conf, NetUtils.getDefaultSocketFactory(conf),
-            Client.getRpcTimeout(conf), retryPolicy).getProxy());
+        new SCMSecurityProtocolFailoverProxyProvider(conf,
+            UserGroupInformation.getCurrentUser()));
   }
 
 
@@ -473,17 +475,11 @@
    */
   public static SCMSecurityProtocol getScmSecurityClient(
       OzoneConfiguration conf, UserGroupInformation ugi) throws IOException {
-    RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress scmSecurityProtoAdd =
-        getScmAddressForSecurityProtocol(conf);
-    return new SCMSecurityProtocolClientSideTranslatorPB(
-        RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion,
-            scmSecurityProtoAdd, ugi, conf,
-            NetUtils.getDefaultSocketFactory(conf),
-            Client.getRpcTimeout(conf)));
+    SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient =
+        new SCMSecurityProtocolClientSideTranslatorPB(
+            new SCMSecurityProtocolFailoverProxyProvider(conf, ugi));
+    return TracingUtil.createProxy(scmSecurityClient,
+        SCMSecurityProtocol.class, conf);
   }
 
   /**
@@ -505,6 +501,51 @@
   }
 
   /**
+   * Write DB Checkpoint to an output stream as a compressed file (tgz).
+   *
+   * @param checkpoint  checkpoint file
+   * @param destination destination output stream.
+   * @throws IOException
+   */
+  public static void writeDBCheckpointToStream(DBCheckpoint checkpoint,
+      OutputStream destination)
+      throws IOException {
+    try (CompressorOutputStream gzippedOut = new CompressorStreamFactory()
+        .createCompressorOutputStream(CompressorStreamFactory.GZIP,
+            destination);
+        ArchiveOutputStream archiveOutputStream =
+            new TarArchiveOutputStream(gzippedOut);
+        Stream<Path> files =
+            Files.list(checkpoint.getCheckpointLocation())) {
+      for (Path path : files.collect(Collectors.toList())) {
+        if (path != null) {
+          Path fileName = path.getFileName();
+          if (fileName != null) {
+            includeFile(path.toFile(), fileName.toString(),
+                archiveOutputStream);
+          }
+        }
+      }
+    } catch (CompressorException e) {
+      throw new IOException(
+          "Can't compress the checkpoint: " +
+              checkpoint.getCheckpointLocation(), e);
+    }
+  }
+
+  private static void includeFile(File file, String entryName,
+      ArchiveOutputStream archiveOutputStream)
+      throws IOException {
+    ArchiveEntry archiveEntry =
+        archiveOutputStream.createArchiveEntry(file, entryName);
+    archiveOutputStream.putArchiveEntry(archiveEntry);
+    try (FileInputStream fis = new FileInputStream(file)) {
+      IOUtils.copy(fis, archiveOutputStream);
+    }
+    archiveOutputStream.closeArchiveEntry();
+  }
+
+  /**
    * Converts RocksDB exception to IOE.
    * @param msg  - Message to add to exception.
    * @param e - Original Exception.
@@ -519,5 +560,4 @@
         + "; message : " + errMessage;
     return new IOException(output, e);
   }
-
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
similarity index 73%
rename from hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java
rename to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
index 6c4b1b6..ec4c0e1 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java
@@ -16,23 +16,25 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.ozone.om.ratis;
+package org.apache.hadoop.hdds.utils;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.StringUtils;
 
 import java.io.IOException;
 import java.util.Objects;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
+
+import org.apache.hadoop.ozone.common.ha.ratis.RatisSnapshotInfo;
 import org.apache.ratis.server.protocol.TermIndex;
+import org.apache.ratis.statemachine.SnapshotInfo;
 
 import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
 import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_SPLIT_KEY;
 
 /**
- * TransactionInfo which is persisted to OM DB.
+ * TransactionInfo which is persisted to DB.
  */
-public final class OMTransactionInfo {
+public final class TransactionInfo {
 
   // Term associated with Ratis Log index in Ratis enabled cluster. In
   // non-Ratis cluster, term is set to -1.
@@ -42,7 +44,7 @@
   // non-Ratis cluster
   private long transactionIndex;
 
-  private OMTransactionInfo(String transactionInfo) {
+  private TransactionInfo(String transactionInfo) {
     String[] tInfo =
         transactionInfo.split(TRANSACTION_INFO_SPLIT_KEY);
     Preconditions.checkState(tInfo.length==2,
@@ -52,11 +54,23 @@
     transactionIndex = Long.parseLong(tInfo[1]);
   }
 
-  private OMTransactionInfo(long currentTerm, long transactionIndex) {
+  private TransactionInfo(long currentTerm, long transactionIndex) {
     this.term = currentTerm;
     this.transactionIndex = transactionIndex;
   }
 
+  public boolean isDefault() {
+    return transactionIndex == -1 && term == 0;
+  }
+
+  public int compareTo(TransactionInfo info) {
+    if (info.getTerm() == this.getTerm()) {
+      return (int)(this.getTransactionIndex() - info.getTransactionIndex());
+    } else {
+      return (int)(this.getTerm() - info.getTerm());
+    }
+  }
+
   /**
    * Get current term.
    * @return currentTerm
@@ -104,9 +118,9 @@
    * @param bytes
    * @return OMTransactionInfo
    */
-  public static OMTransactionInfo getFromByteArray(byte[] bytes) {
+  public static TransactionInfo getFromByteArray(byte[] bytes) {
     String tInfo = StringUtils.bytes2String(bytes);
-    return new OMTransactionInfo(tInfo);
+    return new TransactionInfo(tInfo);
   }
 
   @Override
@@ -117,11 +131,16 @@
     if (o == null || getClass() != o.getClass()) {
       return false;
     }
-    OMTransactionInfo that = (OMTransactionInfo) o;
+    TransactionInfo that = (TransactionInfo) o;
     return term == that.term &&
         transactionIndex == that.transactionIndex;
   }
 
+  public static TransactionInfo fromTermIndex(TermIndex termIndex) {
+    return new Builder().setCurrentTerm(termIndex.getTerm())
+        .setTransactionIndex(termIndex.getIndex()).build();
+  }
+
   @Override
   public int hashCode() {
     return Objects.hash(term, transactionIndex);
@@ -138,12 +157,20 @@
    * @return
    * @throws IOException
    */
-  public static OMTransactionInfo readTransactionInfo(
-      OMMetadataManager metadataManager) throws IOException {
+  public static TransactionInfo readTransactionInfo(
+      DBStoreHAManager metadataManager) throws IOException {
     return metadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
   }
+
+  public SnapshotInfo toSnapshotInfo() {
+    return new RatisSnapshotInfo(term, transactionIndex);
+  }
+
+  public static Builder builder() {
+    return new Builder();
+  }
   /**
-   * Builder to build {@link OMTransactionInfo}.
+   * Builder to build {@link TransactionInfo}.
    */
   public static class Builder {
     private long currentTerm = 0;
@@ -159,9 +186,8 @@
       return this;
     }
 
-    public OMTransactionInfo build() {
-      return new OMTransactionInfo(currentTerm, transactionIndex);
+    public TransactionInfo build() {
+      return new TransactionInfo(currentTerm, transactionIndex);
     }
-
   }
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OMTransactionInfoCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfoCodec.java
similarity index 69%
rename from hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OMTransactionInfoCodec.java
rename to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfoCodec.java
index 2cca220..86aa373 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OMTransactionInfoCodec.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfoCodec.java
@@ -15,36 +15,35 @@
  * the License.
  */
 
-package org.apache.hadoop.ozone.om.codec;
+package org.apache.hadoop.hdds.utils;
 
 import org.apache.hadoop.hdds.utils.db.Codec;
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
 
 import java.io.IOException;
 
 import static com.google.common.base.Preconditions.checkNotNull;
 
 /**
- * Codec to convert {@link OMTransactionInfo} to byte array and from byte array
- * to {@link OMTransactionInfo}.
+ * Codec to convert {@link TransactionInfo} to byte array and from byte array
+ * to {@link TransactionInfo}.
  */
-public class OMTransactionInfoCodec implements Codec<OMTransactionInfo> {
+public class TransactionInfoCodec implements Codec<TransactionInfo> {
   @Override
-  public byte[] toPersistedFormat(OMTransactionInfo object) throws IOException {
+  public byte[] toPersistedFormat(TransactionInfo object) throws IOException {
     checkNotNull(object, "Null object can't be converted to byte array.");
     return object.convertToByteArray();
   }
 
   @Override
-  public OMTransactionInfo fromPersistedFormat(byte[] rawData)
+  public TransactionInfo fromPersistedFormat(byte[] rawData)
       throws IOException {
     checkNotNull(rawData, "Null byte array can't be converted to " +
         "real object.");
-    return OMTransactionInfo.getFromByteArray(rawData);
+    return TransactionInfo.getFromByteArray(rawData);
   }
 
   @Override
-  public OMTransactionInfo copyObject(OMTransactionInfo object) {
+  public TransactionInfo copyObject(TransactionInfo object) {
     return object;
   }
 }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
index de2b87f..c60c975 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java
@@ -19,9 +19,10 @@
 
 package org.apache.hadoop.hdds.security.x509.certificate.authority;
 
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.bouncycastle.asn1.x509.CRLReason;
 import org.bouncycastle.cert.X509CertificateHolder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import java.io.IOException;
 import java.math.BigInteger;
 import java.security.cert.X509Certificate;
@@ -36,12 +37,17 @@
 public class MockCAStore implements CertificateStore {
   @Override
   public void storeValidCertificate(BigInteger serialID,
-                                    X509Certificate certificate)
+      X509Certificate certificate, NodeType role)
       throws IOException {
 
   }
 
   @Override
+  public void checkValidCertID(BigInteger serialID) throws IOException {
+
+  }
+
+  @Override
   public Optional<Long> revokeCertificates(
       List<BigInteger> serialIDs,
       X509CertificateHolder caCertificateHolder,
@@ -65,9 +71,12 @@
   }
 
   @Override
-  public List<X509Certificate> listCertificate(HddsProtos.NodeType role,
+  public List<X509Certificate> listCertificate(NodeType role,
       BigInteger startSerialID, int count, CertType certType)
       throws IOException {
     return Collections.emptyList();
   }
+
+  @Override
+  public void reinitialize(SCMMetadataStore metadataStore) {}
 }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
index ca333ad..f5a2d8c 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
@@ -23,6 +23,10 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
 import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
 import org.apache.hadoop.test.LambdaTestUtils;
@@ -31,6 +35,7 @@
 import org.bouncycastle.cert.X509CertificateHolder;
 import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
 import org.bouncycastle.pkcs.PKCS10CertificationRequest;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -38,6 +43,7 @@
 
 import java.io.IOException;
 import java.math.BigInteger;
+import java.nio.file.Paths;
 import java.security.KeyPair;
 import java.security.NoSuchAlgorithmException;
 import java.security.NoSuchProviderException;
@@ -54,11 +60,16 @@
 
 import static junit.framework.TestCase.assertTrue;
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.OM;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.SCM;
+import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer.CAType.INTERMEDIARY_CA;
+import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer.CAType.SELF_SIGNED_CA;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_CA_CERT_STORAGE_DIR;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_CA_PATH;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.fail;
 
-
 /**
  * Tests the Default CA Server.
  */
@@ -80,24 +91,16 @@
     SecurityConfig securityConfig = new SecurityConfig(conf);
     CertificateServer testCA = new DefaultCAServer("testCA",
         RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
-    testCA.init(securityConfig, CertificateServer.CAType.SELF_SIGNED_CA);
+        RandomStringUtils.randomAlphabetic(4), caStore,
+        new DefaultProfile(),
+        Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString());
+    testCA.init(securityConfig, SELF_SIGNED_CA);
     X509CertificateHolder first = testCA.getCACertificate();
     assertNotNull(first);
     //Init is idempotent.
-    testCA.init(securityConfig, CertificateServer.CAType.SELF_SIGNED_CA);
+    testCA.init(securityConfig, SELF_SIGNED_CA);
     X509CertificateHolder second = testCA.getCACertificate();
     assertEquals(first, second);
-
-    // we only support Self Signed CA for now.
-    try {
-      testCA.init(securityConfig, CertificateServer.CAType.INTERMEDIARY_CA);
-      fail("code should not reach here, exception should have been thrown.");
-    } catch (IllegalStateException e) {
-      // This is a run time exception, hence it is not caught by the junit
-      // expected Exception.
-      assertTrue(e.toString().contains("Not implemented"));
-    }
   }
 
   @Test
@@ -105,10 +108,12 @@
     SecurityConfig securityConfig = new SecurityConfig(conf);
     CertificateServer testCA = new DefaultCAServer("testCA",
         RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
+        RandomStringUtils.randomAlphabetic(4), caStore,
+        new DefaultProfile(),
+        Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString());
     Consumer<SecurityConfig> caInitializer =
         ((DefaultCAServer) testCA).processVerificationStatus(
-        DefaultCAServer.VerificationStatus.MISSING_CERTIFICATE);
+        DefaultCAServer.VerificationStatus.MISSING_CERTIFICATE, SELF_SIGNED_CA);
     try {
 
       caInitializer.accept(securityConfig);
@@ -125,10 +130,12 @@
     SecurityConfig securityConfig = new SecurityConfig(conf);
     CertificateServer testCA = new DefaultCAServer("testCA",
         RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
+        RandomStringUtils.randomAlphabetic(4), caStore,
+        new DefaultProfile(),
+        Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString());
     Consumer<SecurityConfig> caInitializer =
         ((DefaultCAServer) testCA).processVerificationStatus(
-            DefaultCAServer.VerificationStatus.MISSING_KEYS);
+            DefaultCAServer.VerificationStatus.MISSING_KEYS, SELF_SIGNED_CA);
     try {
 
       caInitializer.accept(securityConfig);
@@ -174,12 +181,14 @@
     String csrString = CertificateSignRequest.getEncodedString(csr);
 
     CertificateServer testCA = new DefaultCAServer("testCA",
-        clusterId, scmId, caStore);
+        clusterId, scmId, caStore,
+        new DefaultProfile(),
+        Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString());
     testCA.init(new SecurityConfig(conf),
-        CertificateServer.CAType.SELF_SIGNED_CA);
+        SELF_SIGNED_CA);
 
     Future<X509CertificateHolder> holder = testCA.requestCertificate(csrString,
-        CertificateApprover.ApprovalType.TESTING_AUTOMATIC);
+        CertificateApprover.ApprovalType.TESTING_AUTOMATIC, SCM);
     // Right now our calls are synchronous. Eventually this will have to wait.
     assertTrue(holder.isDone());
     assertNotNull(holder.get());
@@ -216,12 +225,14 @@
 
     CertificateServer testCA = new DefaultCAServer("testCA",
         RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
+        RandomStringUtils.randomAlphabetic(4), caStore,
+        new DefaultProfile(),
+        Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString());
     testCA.init(new SecurityConfig(conf),
-        CertificateServer.CAType.SELF_SIGNED_CA);
+        SELF_SIGNED_CA);
 
     Future<X509CertificateHolder> holder = testCA.requestCertificate(csrString,
-        CertificateApprover.ApprovalType.TESTING_AUTOMATIC);
+        CertificateApprover.ApprovalType.TESTING_AUTOMATIC, OM);
     // Right now our calls are synchronous. Eventually this will have to wait.
     assertTrue(holder.isDone());
     assertNotNull(holder.get());
@@ -234,9 +245,11 @@
     Date now = new Date();
 
     CertificateServer testCA = new DefaultCAServer("testCA",
-        clusterId, scmId, caStore);
+        clusterId, scmId, caStore,
+        new DefaultProfile(),
+        Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString());
     testCA.init(new SecurityConfig(conf),
-        CertificateServer.CAType.SELF_SIGNED_CA);
+        SELF_SIGNED_CA);
 
     KeyPair keyPair =
         new HDDSKeyGenerator(conf).generateKey();
@@ -253,7 +266,7 @@
     String csrString = CertificateSignRequest.getEncodedString(csr);
 
     Future<X509CertificateHolder> holder = testCA.requestCertificate(csrString,
-        CertificateApprover.ApprovalType.TESTING_AUTOMATIC);
+        CertificateApprover.ApprovalType.TESTING_AUTOMATIC, OM);
 
     X509Certificate certificate =
         new JcaX509CertificateConverter().getCertificate(holder.get());
@@ -300,19 +313,99 @@
 
     CertificateServer testCA = new DefaultCAServer("testCA",
         RandomStringUtils.randomAlphabetic(4),
-        RandomStringUtils.randomAlphabetic(4), caStore);
+        RandomStringUtils.randomAlphabetic(4), caStore,
+        new DefaultProfile(),
+        Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString());
     testCA.init(new SecurityConfig(conf),
-        CertificateServer.CAType.SELF_SIGNED_CA);
+        SELF_SIGNED_CA);
 
     LambdaTestUtils.intercept(ExecutionException.class, "ScmId and " +
             "ClusterId in CSR subject are incorrect",
         () -> {
           Future<X509CertificateHolder> holder =
               testCA.requestCertificate(csrString,
-                  CertificateApprover.ApprovalType.TESTING_AUTOMATIC);
+                  CertificateApprover.ApprovalType.TESTING_AUTOMATIC, OM);
           holder.isDone();
           holder.get();
         });
   }
 
+  @Test(expected = IllegalStateException.class)
+  public void testIntermediaryCAWithEmpty()
+      throws Exception {
+
+    CertificateServer scmCA = new DefaultCAServer("testCA",
+        RandomStringUtils.randomAlphabetic(4),
+        RandomStringUtils.randomAlphabetic(4), caStore,
+        new DefaultProfile(), Paths.get("scm").toString());
+
+    scmCA.init(new SecurityConfig(conf), INTERMEDIARY_CA);
+  }
+
+  @Test
+  public void testIntermediaryCA() throws Exception {
+
+    String clusterId = RandomStringUtils.randomAlphanumeric(4);
+    String scmId = RandomStringUtils.randomAlphanumeric(4);
+
+    CertificateServer rootCA = new DefaultCAServer("rootCA",
+        clusterId, scmId, caStore, new DefaultProfile(),
+        Paths.get("scm", "ca").toString());
+
+    rootCA.init(new SecurityConfig(conf), SELF_SIGNED_CA);
+
+
+    SCMCertificateClient scmCertificateClient =
+        new SCMCertificateClient(new SecurityConfig(conf));
+
+    CertificateClient.InitResponse response = scmCertificateClient.init();
+    Assert.assertEquals(CertificateClient.InitResponse.GETCERT, response);
+
+    // Generate cert
+    KeyPair keyPair =
+        new HDDSKeyGenerator(conf).generateKey();
+    PKCS10CertificationRequest csr = new CertificateSignRequest.Builder()
+        .addDnsName("hadoop.apache.org")
+        .addIpAddress("8.8.8.8")
+        .setCA(false)
+        .setSubject("testCA")
+        .setConfiguration(conf)
+        .setKey(keyPair)
+        .build();
+
+    Future<X509CertificateHolder> holder = rootCA.requestCertificate(csr,
+        CertificateApprover.ApprovalType.TESTING_AUTOMATIC, SCM);
+    Assert.assertTrue(holder.isDone());
+
+    X509CertificateHolder certificateHolder = holder.get();
+    Assert.assertNotNull(certificateHolder);
+
+    X509CertificateHolder rootCertHolder = rootCA.getCACertificate();
+
+    scmCertificateClient.storeCertificate(
+        CertificateCodec.getPEMEncodedString(rootCertHolder), true, true);
+
+    // Write to the location where Default CA Server reads from.
+    scmCertificateClient.storeCertificate(
+        CertificateCodec.getPEMEncodedString(certificateHolder), true);
+
+    CertificateCodec certCodec =
+        new CertificateCodec(new SecurityConfig(conf),
+            scmCertificateClient.getComponentName());
+    certCodec.writeCertificate(certificateHolder);
+
+    // The certificate generated by above cert client will be used by scmCA.
+    // Now scmCA init should be successful.
+    CertificateServer scmCA = new DefaultCAServer("scmCA",
+        clusterId, scmId, caStore, new DefaultProfile(),
+        scmCertificateClient.getComponentName());
+
+    try {
+      scmCA.init(new SecurityConfig(conf), INTERMEDIARY_CA);
+    } catch (Exception e) {
+      fail("testIntermediaryCA failed during init");
+    }
+
+  }
+
 }
diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
index a5a6287..ed613c8 100644
--- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
+++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto
@@ -109,6 +109,7 @@
     OK = 1;
     CONTAINER_ALREADY_EXISTS = 2;
     CONTAINER_IS_MISSING = 3;
+    SCM_NOT_LEADER = 4;
   }
 }
 
@@ -160,6 +161,7 @@
     success = 1;
     errorContainerAlreadyExists = 2;
     errorContainerMissing = 3;
+    scmNotLeader = 4;
   }
   required Error errorCode = 1;
   required ContainerWithPipeline containerWithPipeline = 2;
diff --git a/hadoop-hdds/interface-client/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/interface-client/dev-support/findbugsExcludeFile.xml
index ba54a4f..18cdb6c 100644
--- a/hadoop-hdds/interface-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/interface-client/dev-support/findbugsExcludeFile.xml
@@ -21,4 +21,7 @@
   <Match>
     <Package name="org.apache.hadoop.hdds.protocol.proto"/>
   </Match>
+  <Match>
+    <Package name="org.apache.hadoop.hdds.protocol.scm.proto"/>
+  </Match>
 </FindBugsFilter>
diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml
index eca7067..c72d45a 100644
--- a/hadoop-hdds/interface-client/pom.xml
+++ b/hadoop-hdds/interface-client/pom.xml
@@ -71,6 +71,7 @@
               <protoSourceRoot>${basedir}/src/main/proto/</protoSourceRoot>
               <includes>
                 <include>DatanodeClientProtocol.proto</include>
+                <include>InterSCMProtocol.proto</include>
               </includes>
               <outputDirectory>target/generated-sources/java</outputDirectory>
               <clearOutputDirectory>false</clearOutputDirectory>
@@ -119,6 +120,18 @@
                          value="org.apache.ratis.thirdparty.com.google.common"
                          dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/datanode/proto">
                 </replace>
+                <replace token="com.google.protobuf"
+                         value="org.apache.ratis.thirdparty.com.google.protobuf"
+                         dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/scm/proto">
+                </replace>
+                <replace token="io.grpc"
+                         value="org.apache.ratis.thirdparty.io.grpc"
+                         dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/scm/proto">
+                </replace>
+                <replace token="com.google.common"
+                         value="org.apache.ratis.thirdparty.com.google.common"
+                         dir="target/generated-sources/java/org/apache/hadoop/hdds/protocol/scm/proto">
+                </replace>
               </tasks>
             </configuration>
             <goals>
diff --git a/hadoop-hdds/interface-client/src/main/proto/InterSCMProtocol.proto b/hadoop-hdds/interface-client/src/main/proto/InterSCMProtocol.proto
new file mode 100644
index 0000000..8c726c6
--- /dev/null
+++ b/hadoop-hdds/interface-client/src/main/proto/InterSCMProtocol.proto
@@ -0,0 +1,46 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/**
+ * These .proto interfaces are private and unstable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *unstable* .proto interface.
+ */
+syntax = "proto2";
+option java_package = "org.apache.hadoop.hdds.protocol.scm.proto";
+option java_outer_classname = "InterSCMProtocolProtos";
+option java_generate_equals_and_hash = true;
+
+
+message CopyDBCheckpointRequestProto {
+  required bool flush = 1;
+}
+
+message CopyDBCheckpointResponseProto {
+  required string clusterId = 1;
+  required uint64 len = 2;
+  required bool eof = 3;
+  required bytes data = 4;
+  required uint64 readOffset = 6;
+  optional int64 checksum = 7;
+}
+
+service InterSCMProtocolService {
+  // An inter SCM service to copy SCM DB checkpoint from leader to follower
+  rpc download (CopyDBCheckpointRequestProto) returns (stream CopyDBCheckpointResponseProto);
+}
\ No newline at end of file
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index b43600c..bf7ceb7 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -73,6 +73,12 @@
     repeated Port ports = 4;
 }
 
+message ScmNodeDetailsProto {
+    required string scmNodeId = 1;     // SCM Node Id.
+    required string clusterId = 2;     // Cluster Id of SCM cluster.
+    required string hostName = 3;      // Hostname of SCM.
+}
+
 message Port {
     required string name = 1;
     required uint32 value = 2;
@@ -85,6 +91,10 @@
   optional UUID uuid128 = 100;
 }
 
+message ContainerID {
+    required uint64 id = 1;
+}
+
 enum PipelineState {
     PIPELINE_ALLOCATED = 1;
     PIPELINE_OPEN = 2;
@@ -197,6 +207,7 @@
 }
 
 message ContainerInfoProto {
+    // Replace int64 with ContainerID message
     required int64 containerID = 1;
     required LifeCycleState state = 2;
     optional PipelineID pipelineID = 3;
@@ -222,8 +233,19 @@
 message GetScmInfoResponseProto {
     required string clusterId = 1;
     required string scmId = 2;
+    repeated string peerRoles = 3;
 }
 
+message AddScmRequestProto {
+    required string clusterId = 1;
+    required string scmId = 2;
+    required string ratisAddr = 3;
+}
+
+message AddScmResponseProto {
+    required bool success = 1;
+    optional string scmId = 2;
+}
 
 enum ReplicationType {
     RATIS = 1;
@@ -252,6 +274,7 @@
 
 message ExcludeListProto {
     repeated string datanodes = 1;
+    // Replace int64 with ContainerID message
     repeated int64 containerIds = 2;
     repeated PipelineID pipelineIds = 3;
 }
@@ -260,6 +283,7 @@
  * Block ID that uniquely identify a block by SCM.
  */
 message ContainerBlockID {
+    // Replace int64 with ContainerID message
     required int64 containerID = 1;
     required int64 localID = 2;
 }
diff --git a/hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto
new file mode 100644
index 0000000..b9d3941
--- /dev/null
+++ b/hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hdds.protocol.proto";
+option java_outer_classname = "SCMRatisProtocol";
+option java_generate_equals_and_hash = true;
+
+enum RequestType {
+    PIPELINE = 1;
+    CONTAINER = 2;
+    BLOCK = 3;
+    SEQUENCE_ID = 4;
+    CERT_STORE = 5;
+}
+
+message Method {
+    required string name = 1;
+    repeated MethodArgument args = 2;
+}
+
+message MethodArgument {
+    required string type = 1;
+    required bytes value = 2;
+}
+
+message ListArgument {
+    required string type = 1;
+    repeated bytes value = 2;
+}
+
+message SCMRatisRequestProto {
+    required RequestType type = 1;
+    required Method method = 2;
+}
+
+message SCMRatisResponseProto {
+    required string type = 2;
+    required bytes value = 3;
+}
diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index 116e35e..7f73a2a 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@ -305,6 +305,10 @@
   optional CreatePipelineCommandProto createPipelineCommandProto = 7;
   optional ClosePipelineCommandProto closePipelineCommandProto = 8;
   optional SetNodeOperationalStateCommandProto setNodeOperationalStateCommandProto = 9;
+
+  // If running upon Ratis, holds term of underlying RaftServer iff current
+  // SCM is a leader. If running without Ratis, holds SCMContext.INVALID_TERM.
+  optional int64 term = 15;
 }
 
 /**
diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
index 5eac5be..1b73d6a 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
@@ -38,6 +38,7 @@
   DeleteScmKeyBlocks = 12;
   GetScmInfo         = 13;
   SortDatanodes      = 14;
+  AddScm             = 15;
 }
 
 message SCMBlockLocationRequest {
@@ -54,6 +55,7 @@
   optional DeleteScmKeyBlocksRequestProto     deleteScmKeyBlocksRequest = 12;
   optional hadoop.hdds.GetScmInfoRequestProto getScmInfoRequest         = 13;
   optional SortDatanodesRequestProto          sortDatanodesRequest      = 14;
+  optional hadoop.hdds.AddScmRequestProto     addScmRequestProto       = 15;
 }
 
 message SCMBlockLocationResponse {
@@ -71,10 +73,13 @@
 
   optional string leaderOMNodeId = 6;
 
+  optional string leaderSCMNodeId = 7;
+
   optional AllocateScmBlockResponseProto       allocateScmBlockResponse   = 11;
   optional DeleteScmKeyBlocksResponseProto     deleteScmKeyBlocksResponse = 12;
   optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse         = 13;
   optional SortDatanodesResponseProto          sortDatanodesResponse      = 14;
+  optional hadoop.hdds.AddScmResponseProto     addScmResponse        = 15;
 }
 
 /**
@@ -117,6 +122,7 @@
   INTERNAL_ERROR = 29;
   FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY = 30;
   FAILED_TO_INIT_LEADER_CHOOSE_POLICY = 31;
+  SCM_NOT_LEADER = 32;
 }
 
 /**
diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto
index 114d215..31aac90 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto
@@ -49,6 +49,8 @@
     optional SCMGetCertificateRequestProto getCertificateRequest = 5;
     optional SCMGetCACertificateRequestProto getCACertificateRequest = 6;
     optional SCMListCertificateRequestProto listCertificateRequest = 7;
+    optional SCMGetSCMCertRequestProto getSCMCertificateRequest = 8;
+    optional SCMListCACertificateRequestProto listCACertificateRequestProto = 9;
 
 }
 
@@ -77,10 +79,27 @@
     GetCertificate = 3;
     GetCACertificate = 4;
     ListCertificate = 5;
+    GetSCMCertificate = 6;
+    GetRootCACertificate = 7;
+    ListCACertificate = 8;
 }
 
 enum Status {
     OK = 1;
+    INVALID_CSR = 2;
+    UNABLE_TO_ISSUE_CERTIFICATE = 3;
+    GET_DN_CERTIFICATE_FAILED = 4;
+    GET_OM_CERTIFICATE_FAILED = 5;
+    GET_SCM_CERTIFICATE_FAILED = 6;
+    GET_CERTIFICATE_FAILED = 7;
+    GET_CA_CERT_FAILED = 8;
+    CERTIFICATE_NOT_FOUND = 9;
+    PEM_ENCODE_FAILED = 10;
+    INTERNAL_ERROR = 11;
+    DEFAULT = 12;
+    MISSING_BLOCK_TOKEN = 13;
+    BLOCK_TOKEN_VERIFICATION_FAILED = 14;
+    GET_ROOT_CA_CERTIFICATE_FAILED = 15;
 }
 /**
 * This message is send by data node to prove its identity and get an SCM
@@ -100,6 +119,11 @@
     required string CSR = 2;
 }
 
+message SCMGetSCMCertRequestProto {
+    required ScmNodeDetailsProto scmDetails = 1;
+    required string CSR = 2;
+}
+
 /**
 * Proto request to get a certificate with given serial id.
 */
@@ -135,6 +159,8 @@
     required ResponseCode responseCode = 1;
     required string x509Certificate = 2; // Base64 encoded X509 certificate.
     optional string x509CACertificate = 3; // Base64 encoded CA X509 certificate.
+    // Base64 encoded Root CA X509 certificate.
+    optional string x509RootCACertificate = 4;
 }
 
 /**
@@ -149,6 +175,11 @@
     repeated string certificates = 2;
 }
 
+message SCMGetRootCACertificateRequestProto {
+}
+
+message SCMListCACertificateRequestProto {
+}
 
 service SCMSecurityProtocolService {
     rpc submitRequest (SCMSecurityRequest) returns (SCMSecurityResponse);
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 3a81a95..ad0c445 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -126,6 +126,12 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-common</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdds-server-scm</artifactId>
         <version>${hdds.version}</version>
       </dependency>
diff --git a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
index 55b900b..50f3491 100644
--- a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml
@@ -15,6 +15,9 @@
    limitations under the License.
 -->
 <FindBugsFilter>
+  <Match>
+    <Package name="org.apache.hadoop.hdds.protocol.proto"/>
+  </Match>
   <!-- Test -->
   <Match>
     <Class name="org.apache.hadoop.hdds.scm.TestHddsServerUtil" />
@@ -44,4 +47,8 @@
     <Class name="org.apache.hadoop.hdds.scm.server.TestSCMSecurityProtocolServer" />
     <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
   </Match>
+  <Match>
+    <Class name="org.apache.hadoop.hdds.scm.metadata.TestSCMTransactionInfoCodec"/>
+    <Bug pattern="NP_NULL_PARAM_DEREF_ALL_TARGETS_DANGEROUS" />
+  </Match>
 </FindBugsFilter>
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 4e1d481..9aeb67c 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -50,6 +50,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-hadoop-dependency-server</artifactId>
     </dependency>
     <dependency>
@@ -135,6 +139,11 @@
       <artifactId>hadoop-hdds-hadoop-dependency-test</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
   <build>
     <plugins>
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
index 426341a..cada48c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
@@ -18,13 +18,39 @@
 
 package org.apache.hadoop.hdds.scm;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.safemode.Precheck;
 
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.util.Optional;
+import java.util.OptionalInt;
+
+import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
+import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+
 /**
  * SCM utility class.
  */
@@ -48,4 +74,88 @@
     }
   }
 
+  /**
+   * Create SCM directory file based on given path.
+   */
+  public static File createSCMDir(String dirPath) {
+    File dirFile = new File(dirPath);
+    if (!dirFile.mkdirs() && !dirFile.exists()) {
+      throw new IllegalArgumentException("Unable to create path: " + dirFile);
+    }
+    return dirFile;
+  }
+
+  public static InetSocketAddress getScmBlockProtocolServerAddress(
+      OzoneConfiguration conf, String localScmServiceId, String nodeId) {
+    String bindHostKey = ConfUtils.addKeySuffixes(
+        OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY, localScmServiceId, nodeId);
+    final Optional<String> host = getHostNameFromConfigKeys(conf, bindHostKey);
+
+    String addressKey = ConfUtils.addKeySuffixes(
+        OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, localScmServiceId, nodeId);
+    final OptionalInt port = getPortNumberFromConfigKeys(conf, addressKey);
+
+    return NetUtils.createSocketAddr(
+        host.orElse(
+            ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT) + ":" +
+            port.orElse(conf.getInt(ConfUtils.addKeySuffixes(
+                OZONE_SCM_BLOCK_CLIENT_PORT_KEY, localScmServiceId, nodeId),
+                OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT)));
+  }
+
+  public static String getScmBlockProtocolServerAddressKey(
+      String serviceId, String nodeId) {
+    return ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+        serviceId, nodeId);
+  }
+
+  public static InetSocketAddress getClientProtocolServerAddress(
+      OzoneConfiguration conf, String localScmServiceId, String nodeId) {
+    String bindHostKey = ConfUtils.addKeySuffixes(
+        OZONE_SCM_CLIENT_BIND_HOST_KEY, localScmServiceId, nodeId);
+
+    final String host = getHostNameFromConfigKeys(conf, bindHostKey)
+        .orElse(OZONE_SCM_CLIENT_BIND_HOST_DEFAULT);
+
+    String addressKey = ConfUtils.addKeySuffixes(
+        OZONE_SCM_CLIENT_ADDRESS_KEY, localScmServiceId, nodeId);
+
+    final int port = getPortNumberFromConfigKeys(conf, addressKey)
+        .orElse(conf.getInt(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_PORT_KEY,
+            localScmServiceId, nodeId), OZONE_SCM_CLIENT_PORT_DEFAULT));
+
+    return NetUtils.createSocketAddr(host + ":" + port);
+  }
+
+  public static String getClientProtocolServerAddressKey(
+      String serviceId, String nodeId) {
+    return ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY, serviceId,
+        nodeId);
+  }
+
+  public static InetSocketAddress getScmDataNodeBindAddress(
+      ConfigurationSource conf, String localScmServiceId, String nodeId) {
+    String bindHostKey = ConfUtils.addKeySuffixes(
+        OZONE_SCM_DATANODE_BIND_HOST_KEY,
+        localScmServiceId, nodeId
+    );
+    final Optional<String> host = getHostNameFromConfigKeys(conf, bindHostKey);
+    String addressKey = ConfUtils.addKeySuffixes(
+        OZONE_SCM_DATANODE_ADDRESS_KEY, localScmServiceId,
+        nodeId);
+    final OptionalInt port = getPortNumberFromConfigKeys(conf, addressKey);
+
+    return NetUtils.createSocketAddr(
+        host.orElse(OZONE_SCM_DATANODE_BIND_HOST_DEFAULT) + ":" +
+            port.orElse(conf.getInt(ConfUtils.addKeySuffixes(
+                OZONE_SCM_DATANODE_PORT_KEY, localScmServiceId, nodeId),
+                OZONE_SCM_DATANODE_PORT_DEFAULT)));
+  }
+
+  public static String getScmDataNodeBindAddressKey(
+      String serviceId, String nodeId) {
+    return ConfUtils.addKeySuffixes(
+        OZONE_SCM_DATANODE_ADDRESS_KEY,
+        serviceId, nodeId);
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
index 77fe841..bfc68c7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java
@@ -24,8 +24,6 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.ozone.common.BlockGroup;
 
 /**
@@ -33,8 +31,7 @@
  *  Block APIs.
  *  Container is transparent to these APIs.
  */
-public interface BlockManager extends Closeable,
-    EventHandler<SafeModeStatus> {
+public interface BlockManager extends Closeable {
   /**
    * Allocates a new block for a given size.
    * @param size - Block Size
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 2c4592d..8fdebf0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -32,32 +32,30 @@
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmUtils;
 import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.utils.UniqueId;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.util.StringUtils;
 
 import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.INVALID_BLOCK_SIZE;
+import static org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator.LOCAL_ID;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -70,17 +68,18 @@
   // Currently only user of the block service is Ozone, CBlock manages blocks
   // by itself and does not rely on the Block service offered by SCM.
 
+  private final StorageContainerManager scm;
   private final PipelineManager pipelineManager;
-  private final ContainerManager containerManager;
+  private final ContainerManagerV2 containerManager;
 
   private final long containerSize;
 
-  private final DeletedBlockLog deletedBlockLog;
+  private DeletedBlockLog deletedBlockLog;
   private final SCMBlockDeletingService blockDeletingService;
 
   private ObjectName mxBean;
-  private SafeModePrecheck safeModePrecheck;
-  private PipelineChoosePolicy pipelineChoosePolicy;
+  private final PipelineChoosePolicy pipelineChoosePolicy;
+  private final SequenceIdGenerator sequenceIdGen;
 
   /**
    * Constructor.
@@ -93,9 +92,11 @@
                           final StorageContainerManager scm)
       throws IOException {
     Objects.requireNonNull(scm, "SCM cannot be null");
+    this.scm = scm;
     this.pipelineManager = scm.getPipelineManager();
     this.containerManager = scm.getContainerManager();
     this.pipelineChoosePolicy = scm.getPipelineChoosePolicy();
+    this.sequenceIdGen = scm.getSequenceIdGen();
     this.containerSize = (long)conf.getStorageSize(
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
@@ -104,8 +105,13 @@
     mxBean = MBeans.register("BlockManager", "BlockManagerImpl", this);
 
     // SCM block deleting transaction log and deleting service.
-    deletedBlockLog = new DeletedBlockLogImpl(conf, scm.getContainerManager(),
-        scm.getScmMetadataStore());
+    deletedBlockLog = new DeletedBlockLogImplV2(conf,
+        scm.getContainerManager(),
+        scm.getScmHAManager().getRatisServer(),
+        scm.getScmMetadataStore().getDeletedBlocksTXTable(),
+        scm.getScmHAManager().getDBTransactionBuffer(),
+        scm.getScmContext(),
+        scm.getSequenceIdGen());
     Duration svcInterval = conf.getObject(
             ScmConfig.class).getBlockDeletionInterval();
     long serviceTimeout =
@@ -115,9 +121,8 @@
             TimeUnit.MILLISECONDS);
     blockDeletingService =
         new SCMBlockDeletingService(deletedBlockLog, containerManager,
-            scm.getScmNodeManager(), scm.getEventQueue(), svcInterval,
-            serviceTimeout, conf);
-    safeModePrecheck = new SafeModePrecheck(conf);
+            scm.getScmNodeManager(), scm.getEventQueue(), scm.getScmContext(),
+            scm.getSCMServiceManager(), svcInterval, serviceTimeout, conf);
   }
 
   /**
@@ -159,7 +164,10 @@
     if (LOG.isTraceEnabled()) {
       LOG.trace("Size : {} , type : {}, factor : {} ", size, type, factor);
     }
-    ScmUtils.preCheck(ScmOps.allocateBlock, safeModePrecheck);
+    if (scm.getScmContext().isInSafeMode()) {
+      throw new SCMException("SafeModePrecheck failed for allocateBlock",
+          SCMException.ResultCodes.SAFE_MODE_EXCEPTION);
+    }
     if (size < 0 || size > containerSize) {
       LOG.warn("Invalid block size requested : {}", size);
       throw new SCMException("Unsupported block size: " + size,
@@ -258,12 +266,12 @@
    * @param containerInfo - Container Info.
    * @return AllocatedBlock
    */
-  private AllocatedBlock newBlock(ContainerInfo containerInfo) {
+  private AllocatedBlock newBlock(ContainerInfo containerInfo)
+      throws NotLeaderException {
     try {
       final Pipeline pipeline = pipelineManager
           .getPipeline(containerInfo.getPipelineID());
-      // TODO : Revisit this local ID allocation when HA is added.
-      long localID = UniqueId.next();
+      long localID = sequenceIdGen.getNextId(LOCAL_ID);
       long containerID = containerInfo.getContainerID();
       AllocatedBlock.Builder abb =  new AllocatedBlock.Builder()
           .setContainerBlockID(new ContainerBlockID(containerID, localID))
@@ -294,8 +302,10 @@
   @Override
   public void deleteBlocks(List<BlockGroup> keyBlocksInfoList)
       throws IOException {
-    ScmUtils.preCheck(ScmOps.deleteBlock, safeModePrecheck);
-
+    if (scm.getScmContext().isInSafeMode()) {
+      throw new SCMException("SafeModePrecheck failed for deleteBlocks",
+          SCMException.ResultCodes.SAFE_MODE_EXCEPTION);
+    }
     Map<Long, List<Long>> containerBlocks = new HashMap<>();
     // TODO: track the block size info so that we can reclaim the container
     // TODO: used space when the block is deleted.
@@ -366,25 +376,12 @@
   }
 
   /**
-   * Returns status of scm safe mode determined by SAFE_MODE_STATUS event.
-   * */
-  public boolean isScmInSafeMode() {
-    return this.safeModePrecheck.isInSafeMode();
-  }
-
-  /**
    * Get class logger.
    * */
   public static Logger getLogger() {
     return LOG;
   }
 
-  @Override
-  public void onMessage(SafeModeStatus status,
-      EventPublisher publisher) {
-    this.safeModePrecheck.setInSafeMode(status.isInSafeMode());
-  }
-
   /**
    * This class uses system current time milliseconds to generate unique id.
    */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
index 9a5d74f..4b5ec0a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
@@ -22,6 +22,7 @@
     .DeleteBlockTransactionResult;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.utils.db.Table;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -81,17 +82,6 @@
       UUID dnID);
 
   /**
-   * Creates a block deletion transaction and adds that into the log.
-   *
-   * @param containerID - container ID.
-   * @param blocks - blocks that belong to the same container.
-   *
-   * @throws IOException
-   */
-  void addTransaction(long containerID, List<Long> blocks)
-      throws IOException;
-
-  /**
    * Creates block deletion transactions for a set of containers,
    * add into the log and persist them atomically. An object key
    * might be stored in multiple containers and multiple blocks,
@@ -115,4 +105,10 @@
    * @throws IOException
    */
   int getNumOfValidTransactions() throws IOException;
+
+  /**
+   * Reinitialize the delete log from the db.
+   * @param deletedBlocksTXTable delete transaction table
+   */
+  void reinitialize(Table<Long, DeletedBlocksTransaction> deletedBlocksTXTable);
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index b887ebc..28666d9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdds.scm.block;
 
 import java.io.IOException;
-import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
 import java.util.Set;
@@ -39,9 +38,9 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -76,7 +75,7 @@
       DeletedBlocksTransaction.newBuilder().setContainerID(1).setCount(1);
 
   private final int maxRetry;
-  private final ContainerManager containerManager;
+  private final ContainerManagerV2 containerManager;
   private final SCMMetadataStore scmMetadataStore;
   private final Lock lock;
   // Maps txId to set of DNs which are successful in committing the transaction
@@ -90,7 +89,7 @@
 
 
   public DeletedBlockLogImpl(ConfigurationSource conf,
-                             ContainerManager containerManager,
+                             ContainerManagerV2 containerManager,
                              SCMMetadataStore scmMetadataStore)
       throws IOException {
     maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
@@ -258,7 +257,7 @@
           long txID = transactionResult.getTxID();
           // set of dns which have successfully committed transaction txId.
           dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
-          final ContainerID containerId = ContainerID.valueof(
+          final ContainerID containerId = ContainerID.valueOf(
               transactionResult.getContainerID());
           if (dnsWithCommittedTxn == null) {
             // Mostly likely it's a retried delete command response.
@@ -322,20 +321,6 @@
     return false;
   }
 
-  /**
-   * {@inheritDoc}
-   *
-   * @param containerID - container ID.
-   * @param blocks      - blocks that belong to the same container.
-   * @throws IOException
-   */
-  @Override
-  public void addTransaction(long containerID, List<Long> blocks)
-      throws IOException {
-    Map<Long, List<Long>> map = Collections.singletonMap(containerID, blocks);
-    addTransactions(map);
-  }
-
   @Override
   public int getNumOfValidTransactions() throws IOException {
     lock.lock();
@@ -357,6 +342,12 @@
     }
   }
 
+  @Override
+  public void reinitialize(
+      Table<Long, DeletedBlocksTransaction> deletedBlocksTXTable) {
+    throw new RuntimeException("Not supported operation.");
+  }
+
   /**
    * {@inheritDoc}
    *
@@ -394,7 +385,7 @@
       DatanodeDeletedBlockTransactions transactions) {
     try {
       Set<ContainerReplica> replicas = containerManager
-          .getContainerReplicas(ContainerID.valueof(tx.getContainerID()));
+          .getContainerReplicas(ContainerID.valueOf(tx.getContainerID()));
       for (ContainerReplica replica : replicas) {
         UUID dnID = replica.getDatanodeDetails().getUuid();
         Set<UUID> dnsWithTransactionCommitted =
@@ -427,7 +418,7 @@
         while (iter.hasNext() && numBlocksAdded < blockDeletionLimit) {
           Table.KeyValue<Long, DeletedBlocksTransaction> keyValue = iter.next();
           DeletedBlocksTransaction txn = keyValue.getValue();
-          final ContainerID id = ContainerID.valueof(txn.getContainerID());
+          final ContainerID id = ContainerID.valueOf(txn.getContainerID());
           try {
             if (txn.getCount() > -1 && txn.getCount() <= maxRetry
                 && !containerManager.getContainer(id).isOpen()) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImplV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImplV2.java
new file mode 100644
index 0000000..fa20603
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImplV2.java
@@ -0,0 +1,431 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.block;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.UUID;
+import java.util.Set;
+import java.util.Map;
+import java.util.LinkedHashSet;
+import java.util.ArrayList;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+
+import com.google.common.collect.Lists;
+import static java.lang.Math.min;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator.DEL_TXN_ID;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A implement class of {@link DeletedBlockLog}, and it uses
+ * K/V db to maintain block deletion transactions between scm and datanode.
+ * This is a very basic implementation, it simply scans the log and
+ * memorize the position that scanned by last time, and uses this to
+ * determine where the next scan starts. It has no notion about weight
+ * of each transaction so as long as transaction is still valid, they get
+ * equally same chance to be retrieved which only depends on the nature
+ * order of the transaction ID.
+ */
+public class DeletedBlockLogImplV2
+    implements DeletedBlockLog, EventHandler<DeleteBlockStatus> {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(DeletedBlockLogImpl.class);
+
+  private final int maxRetry;
+  private final ContainerManagerV2 containerManager;
+  private final Lock lock;
+  // Maps txId to set of DNs which are successful in committing the transaction
+  private Map<Long, Set<UUID>> transactionToDNsCommitMap;
+  // Maps txId to its retry counts;
+  private Map<Long, Integer> transactionToRetryCountMap;
+  // The access to DeletedBlocksTXTable is protected by
+  // DeletedBlockLogStateManager.
+  private final DeletedBlockLogStateManager deletedBlockLogStateManager;
+  private final SCMContext scmContext;
+  private final SequenceIdGenerator sequenceIdGen;
+
+  public DeletedBlockLogImplV2(ConfigurationSource conf,
+      ContainerManagerV2 containerManager,
+      SCMRatisServer ratisServer,
+      Table<Long, DeletedBlocksTransaction> deletedBlocksTXTable,
+      DBTransactionBuffer dbTxBuffer,
+      SCMContext scmContext,
+      SequenceIdGenerator sequenceIdGen) {
+    maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
+        OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT);
+    this.containerManager = containerManager;
+    this.lock = new ReentrantLock();
+
+    // transactionToDNsCommitMap is updated only when
+    // transaction is added to the log and when it is removed.
+
+    // maps transaction to dns which have committed it.
+    transactionToDNsCommitMap = new ConcurrentHashMap<>();
+    transactionToRetryCountMap = new ConcurrentHashMap<>();
+    this.deletedBlockLogStateManager = DeletedBlockLogStateManagerImpl
+        .newBuilder()
+        .setConfiguration(conf)
+        .setDeletedBlocksTable(deletedBlocksTXTable)
+        .setRatisServer(ratisServer)
+        .setSCMDBTransactionBuffer(dbTxBuffer)
+        .build();
+    this.scmContext = scmContext;
+    this.sequenceIdGen = sequenceIdGen;
+  }
+
+  @Override
+  public List<DeletedBlocksTransaction> getFailedTransactions()
+      throws IOException {
+    lock.lock();
+    try {
+      final List<DeletedBlocksTransaction> failedTXs = Lists.newArrayList();
+      try (TableIterator<Long,
+          ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
+               deletedBlockLogStateManager.getReadOnlyIterator()) {
+        while (iter.hasNext()) {
+          DeletedBlocksTransaction delTX = iter.next().getValue();
+          if (delTX.getCount() == -1) {
+            failedTXs.add(delTX);
+          }
+        }
+      }
+      return failedTXs;
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   *
+   * @param txIDs - transaction ID.
+   * @throws IOException
+   */
+  @Override
+  public void incrementCount(List<Long> txIDs) throws IOException {
+    lock.lock();
+    try {
+      ArrayList<Long> txIDsToUpdate = new ArrayList<>();
+      for (Long txID : txIDs) {
+        int currentCount =
+            transactionToRetryCountMap.getOrDefault(txID, 0);
+        if (currentCount > maxRetry) {
+          continue;
+        } else {
+          currentCount += 1;
+          if (currentCount > maxRetry) {
+            txIDsToUpdate.add(txID);
+          }
+          transactionToRetryCountMap.put(txID, currentCount);
+        }
+      }
+
+      if (!txIDsToUpdate.isEmpty()) {
+        deletedBlockLogStateManager
+            .increaseRetryCountOfTransactionInDB(txIDsToUpdate);
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+
+  private DeletedBlocksTransaction constructNewTransaction(
+      long txID, long containerID, List<Long> blocks) {
+    return DeletedBlocksTransaction.newBuilder()
+        .setTxID(txID)
+        .setContainerID(containerID)
+        .addAllLocalID(blocks)
+        .setCount(0)
+        .build();
+  }
+
+  /**
+   * {@inheritDoc}
+   *
+   * @param transactionResults - transaction IDs.
+   * @param dnID               - Id of Datanode which has acknowledged
+   *                           a delete block command.
+   * @throws IOException
+   */
+  @Override
+  public void commitTransactions(
+      List<DeleteBlockTransactionResult> transactionResults, UUID dnID) {
+    lock.lock();
+    try {
+      ArrayList<Long> txIDsToBeDeleted = new ArrayList<>();
+      Set<UUID> dnsWithCommittedTxn;
+      for (DeleteBlockTransactionResult transactionResult :
+          transactionResults) {
+        if (isTransactionFailed(transactionResult)) {
+          continue;
+        }
+        try {
+          long txID = transactionResult.getTxID();
+          // set of dns which have successfully committed transaction txId.
+          dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
+          final ContainerID containerId = ContainerID.valueOf(
+              transactionResult.getContainerID());
+          if (dnsWithCommittedTxn == null) {
+            // Mostly likely it's a retried delete command response.
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(
+                  "Transaction txId={} commit by dnId={} for containerID={}"
+                      + " failed. Corresponding entry not found.", txID, dnID,
+                  containerId);
+            }
+            continue;
+          }
+
+          dnsWithCommittedTxn.add(dnID);
+          final ContainerInfo container =
+              containerManager.getContainer(containerId);
+          final Set<ContainerReplica> replicas =
+              containerManager.getContainerReplicas(containerId);
+          // The delete entry can be safely removed from the log if all the
+          // corresponding nodes commit the txn. It is required to check that
+          // the nodes returned in the pipeline match the replication factor.
+          if (min(replicas.size(), dnsWithCommittedTxn.size())
+              >= container.getReplicationFactor().getNumber()) {
+            List<UUID> containerDns = replicas.stream()
+                .map(ContainerReplica::getDatanodeDetails)
+                .map(DatanodeDetails::getUuid)
+                .collect(Collectors.toList());
+            if (dnsWithCommittedTxn.containsAll(containerDns)) {
+              transactionToDNsCommitMap.remove(txID);
+              transactionToRetryCountMap.remove(txID);
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Purging txId={} from block deletion log", txID);
+              }
+              txIDsToBeDeleted.add(txID);
+            }
+          }
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Datanode txId={} containerId={} committed by dnId={}",
+                txID, containerId, dnID);
+          }
+        } catch (IOException e) {
+          LOG.warn("Could not commit delete block transaction: " +
+              transactionResult.getTxID(), e);
+        }
+      }
+      try {
+        deletedBlockLogStateManager.removeTransactionsFromDB(txIDsToBeDeleted);
+      } catch (IOException e) {
+        LOG.warn("Could not commit delete block transactions: "
+            + txIDsToBeDeleted, e);
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private boolean isTransactionFailed(DeleteBlockTransactionResult result) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(
+          "Got block deletion ACK from datanode, TXIDs={}, " + "success={}",
+          result.getTxID(), result.getSuccess());
+    }
+    if (!result.getSuccess()) {
+      LOG.warn("Got failed ACK for TXID={}, prepare to resend the "
+          + "TX in next interval", result.getTxID());
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public int getNumOfValidTransactions() throws IOException {
+    lock.lock();
+    try {
+      final AtomicInteger num = new AtomicInteger(0);
+      try (TableIterator<Long,
+          ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
+               deletedBlockLogStateManager.getReadOnlyIterator()) {
+        while (iter.hasNext()) {
+          DeletedBlocksTransaction delTX = iter.next().getValue();
+          if (delTX.getCount() > -1) {
+            num.incrementAndGet();
+          }
+        }
+      }
+      return num.get();
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Override
+  public void reinitialize(
+      Table<Long, DeletedBlocksTransaction> deletedTable) {
+    // we don't need handle transactionToDNsCommitMap and
+    // deletedBlockLogStateManager, since they will be cleared
+    // when becoming leader.
+    deletedBlockLogStateManager.reinitialize(deletedTable);
+  }
+
+  /**
+   * Called in SCMStateMachine#notifyLeaderChanged when current SCM becomes
+   *  leader.
+   */
+  public void onBecomeLeader() {
+    transactionToDNsCommitMap.clear();
+    transactionToRetryCountMap.clear();
+  }
+
+  /**
+   * Called in SCMDBTransactionBuffer#flush when the cached deleting operations
+   * are flushed.
+   */
+  public void onFlush() {
+    deletedBlockLogStateManager.onFlush();
+  }
+
+  /**
+   * {@inheritDoc}
+   *
+   * @param containerBlocksMap a map of containerBlocks.
+   * @throws IOException
+   */
+  @Override
+  public void addTransactions(Map<Long, List<Long>> containerBlocksMap)
+      throws IOException {
+    lock.lock();
+    try {
+      ArrayList<DeletedBlocksTransaction> txsToBeAdded = new ArrayList<>();
+      for (Map.Entry< Long, List< Long > > entry :
+          containerBlocksMap.entrySet()) {
+        long nextTXID = sequenceIdGen.getNextId(DEL_TXN_ID);
+        DeletedBlocksTransaction tx = constructNewTransaction(nextTXID,
+            entry.getKey(), entry.getValue());
+        txsToBeAdded.add(tx);
+      }
+
+      deletedBlockLogStateManager.addTransactionsToDB(txsToBeAdded);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+  }
+
+  private void getTransaction(DeletedBlocksTransaction tx,
+      DatanodeDeletedBlockTransactions transactions) {
+    try {
+      Set<ContainerReplica> replicas = containerManager
+          .getContainerReplicas(ContainerID.valueOf(tx.getContainerID()));
+      for (ContainerReplica replica : replicas) {
+        UUID dnID = replica.getDatanodeDetails().getUuid();
+        Set<UUID> dnsWithTransactionCommitted =
+            transactionToDNsCommitMap.get(tx.getTxID());
+        if (dnsWithTransactionCommitted == null || !dnsWithTransactionCommitted
+            .contains(dnID)) {
+          // Transaction need not be sent to dns which have
+          // already committed it
+          transactions.addTransactionToDN(dnID, tx);
+        }
+      }
+    } catch (IOException e) {
+      LOG.warn("Got container info error.", e);
+    }
+  }
+
+  @Override
+  public DatanodeDeletedBlockTransactions getTransactions(
+      int blockDeletionLimit) throws IOException {
+    lock.lock();
+    try {
+      DatanodeDeletedBlockTransactions transactions =
+          new DatanodeDeletedBlockTransactions();
+      try (TableIterator<Long,
+          ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
+               deletedBlockLogStateManager.getReadOnlyIterator()) {
+        int numBlocksAdded = 0;
+        ArrayList<Long> txIDs = new ArrayList<>();
+        while (iter.hasNext() && numBlocksAdded < blockDeletionLimit) {
+          Table.KeyValue<Long, DeletedBlocksTransaction> keyValue = iter.next();
+          DeletedBlocksTransaction txn = keyValue.getValue();
+          final ContainerID id = ContainerID.valueOf(txn.getContainerID());
+          try {
+            if (txn.getCount() > -1 && txn.getCount() <= maxRetry
+                && !containerManager.getContainer(id).isOpen()) {
+              numBlocksAdded += txn.getLocalIDCount();
+              getTransaction(txn, transactions);
+              transactionToDNsCommitMap
+                  .putIfAbsent(txn.getTxID(), new LinkedHashSet<>());
+            }
+          } catch (ContainerNotFoundException ex) {
+            LOG.warn("Container: " + id + " was not found for the transaction: "
+                + txn);
+            txIDs.add(txn.getTxID());
+          }
+        }
+
+        deletedBlockLogStateManager.removeTransactionsFromDB(txIDs);
+      }
+      return transactions;
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Override
+  public void onMessage(
+      DeleteBlockStatus deleteBlockStatus, EventPublisher publisher) {
+    if (!scmContext.isLeader()) {
+      LOG.warn("Skip commit transactions since current SCM is not leader.");
+      return;
+    }
+
+    ContainerBlocksDeletionACKProto ackProto =
+        deleteBlockStatus.getCmdStatus().getBlockDeletionAck();
+    commitTransactions(ackProto.getResultsList(),
+        UUID.fromString(ackProto.getDnId()));
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManager.java
new file mode 100644
index 0000000..d90e176
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManager.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.block;
+
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.metadata.Replicate;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+public interface DeletedBlockLogStateManager {
+  @Replicate
+  void addTransactionsToDB(ArrayList<DeletedBlocksTransaction> txs)
+      throws IOException;
+
+  @Replicate
+  void removeTransactionsFromDB(ArrayList<Long> txIDs)
+      throws IOException;
+
+  @Replicate
+  void increaseRetryCountOfTransactionInDB(ArrayList<Long> txIDs)
+      throws IOException;
+
+  TableIterator<Long,
+      KeyValue<Long, DeletedBlocksTransaction>> getReadOnlyIterator();
+
+  void onFlush();
+
+  void reinitialize(Table<Long, DeletedBlocksTransaction> deletedBlocksTXTable);
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
new file mode 100644
index 0000000..d2c55a0
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
@@ -0,0 +1,281 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.block;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.TypedTable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.reflect.Proxy;
+import java.util.ArrayList;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
+
+public class DeletedBlockLogStateManagerImpl
+    implements DeletedBlockLogStateManager {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(DeletedBlockLogStateManagerImpl.class);
+
+  private Table<Long, DeletedBlocksTransaction> deletedTable;
+  private final DBTransactionBuffer transactionBuffer;
+  private final int maxRetry;
+  private final Set<Long> deletingTxIDs;
+  private final Set<Long> skippingRetryTxIDs;
+
+  public DeletedBlockLogStateManagerImpl(
+      ConfigurationSource conf,
+      Table<Long, DeletedBlocksTransaction> deletedTable,
+      DBTransactionBuffer txBuffer) {
+    this.maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
+        OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT);
+    this.deletedTable = deletedTable;
+    this.transactionBuffer = txBuffer;
+    final boolean isRatisEnabled = SCMHAUtils.isSCMHAEnabled(conf);
+    this.deletingTxIDs = isRatisEnabled ? ConcurrentHashMap.newKeySet() : null;
+    this.skippingRetryTxIDs =
+        isRatisEnabled ? ConcurrentHashMap.newKeySet() : null;
+  }
+
+  public TableIterator<Long, TypedTable.KeyValue<Long,
+      DeletedBlocksTransaction>> getReadOnlyIterator() {
+    return new TableIterator<Long, TypedTable.KeyValue<Long,
+        DeletedBlocksTransaction>>() {
+
+      private TableIterator<Long,
+          ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
+          deletedTable.iterator();
+      private TypedTable.KeyValue<Long, DeletedBlocksTransaction> nextTx;
+
+      {
+        findNext();
+      }
+
+      private void findNext() {
+        while (iter.hasNext()) {
+          TypedTable.KeyValue<Long, DeletedBlocksTransaction> next = iter
+              .next();
+          long txID;
+          try {
+            txID = next.getKey();
+          } catch (IOException e) {
+            throw new IllegalStateException("");
+          }
+
+          if ((deletingTxIDs == null || !deletingTxIDs.contains(txID)) && (
+              skippingRetryTxIDs == null || !skippingRetryTxIDs
+                  .contains(txID))) {
+            nextTx = next;
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("DeletedBlocksTransaction matching txID:{}",
+                  txID);
+            }
+            return;
+          }
+        }
+        nextTx = null;
+      }
+
+      @Override
+      public boolean hasNext() {
+        return nextTx != null;
+      }
+
+      @Override
+      public TypedTable.KeyValue<Long, DeletedBlocksTransaction> next() {
+        if (nextTx == null) {
+          throw new NoSuchElementException("DeletedBlocksTransaction " +
+              "Iterator reached end");
+        }
+        TypedTable.KeyValue<Long, DeletedBlocksTransaction> returnTx = nextTx;
+        findNext();
+        return returnTx;
+      }
+
+      @Override
+      public void close() throws IOException {
+        iter.close();
+      }
+
+      @Override
+      public void seekToFirst() {
+        throw new UnsupportedOperationException("seekToFirst");
+      }
+
+      @Override
+      public void seekToLast() {
+        throw new UnsupportedOperationException("seekToLast");
+      }
+
+      @Override
+      public TypedTable.KeyValue<Long, DeletedBlocksTransaction> seek(
+          Long key) throws IOException {
+        throw new UnsupportedOperationException("seek");
+      }
+
+      @Override
+      public Long key() throws IOException {
+        throw new UnsupportedOperationException("key");
+      }
+
+      @Override
+      public TypedTable.KeyValue<Long, DeletedBlocksTransaction> value() {
+        throw new UnsupportedOperationException("value");
+      }
+
+      @Override
+      public void removeFromDB() throws IOException {
+        throw new UnsupportedOperationException("read-only");
+      }
+    };
+  }
+
+  @Override
+  public void addTransactionsToDB(ArrayList<DeletedBlocksTransaction> txs)
+      throws IOException {
+    for (DeletedBlocksTransaction tx : txs) {
+      transactionBuffer.addToBuffer(deletedTable, tx.getTxID(), tx);
+    }
+  }
+
+  @Override
+  public void removeTransactionsFromDB(ArrayList<Long> txIDs)
+      throws IOException {
+    if (deletingTxIDs != null) {
+      deletingTxIDs.addAll(txIDs);
+    }
+    for (Long txID : txIDs) {
+      transactionBuffer.removeFromBuffer(deletedTable, txID);
+    }
+  }
+
+  @Override
+  public void increaseRetryCountOfTransactionInDB(
+      ArrayList<Long> txIDs) throws IOException {
+    for (Long txID : txIDs) {
+      DeletedBlocksTransaction block =
+          deletedTable.get(txID);
+      if (block == null) {
+        if (LOG.isDebugEnabled()) {
+          // This can occur due to race condition between retry and old
+          // service task where old task removes the transaction and the new
+          // task is resending
+          LOG.debug("Deleted TXID {} not found.", txID);
+        }
+        continue;
+      }
+      // if the retry time exceeds the maxRetry value
+      // then set the retry value to -1, stop retrying, admins can
+      // analyze those blocks and purge them manually by SCMCli.
+      DeletedBlocksTransaction.Builder builder = block.toBuilder().setCount(-1);
+      transactionBuffer.addToBuffer(deletedTable, txID, builder.build());
+      if (skippingRetryTxIDs != null) {
+        skippingRetryTxIDs.add(txID);
+      }
+    }
+  }
+
+  public void onFlush() {
+    // onFlush() can be invoked only when ratis is enabled.
+    Preconditions.checkNotNull(deletingTxIDs);
+    Preconditions.checkNotNull(skippingRetryTxIDs);
+    deletingTxIDs.clear();
+    skippingRetryTxIDs.clear();
+  }
+
+  @Override
+  public void reinitialize(
+      Table<Long, DeletedBlocksTransaction> deletedBlocksTXTable) {
+    // Before Reinitialization, flush will be called from Ratis StateMachine.
+    // Just the DeletedDb will be loaded here.
+
+    // We don't need to handle transactionBuffer, deletingTxIDs
+    // and skippingRetryTxIDs here, since onFlush() will be called
+    // before reinitialization. Just update deletedTable here.
+    Preconditions.checkArgument(deletingTxIDs.isEmpty());
+    this.deletedTable = deletedBlocksTXTable;
+  }
+
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for ContainerStateManager.
+   */
+  public static class Builder {
+    private ConfigurationSource conf;
+    private SCMRatisServer scmRatisServer;
+    private Table<Long, DeletedBlocksTransaction> table;
+    private DBTransactionBuffer transactionBuffer;
+
+    public Builder setConfiguration(final ConfigurationSource config) {
+      conf = config;
+      return this;
+    }
+
+    public Builder setRatisServer(final SCMRatisServer ratisServer) {
+      scmRatisServer = ratisServer;
+      return this;
+    }
+
+    public Builder setDeletedBlocksTable(
+        final Table<Long, DeletedBlocksTransaction> deletedBlocksTable) {
+      table = deletedBlocksTable;
+      return this;
+    }
+
+    public Builder setSCMDBTransactionBuffer(DBTransactionBuffer buffer) {
+      this.transactionBuffer = buffer;
+      return this;
+    }
+
+    public DeletedBlockLogStateManager build() {
+      Preconditions.checkNotNull(conf);
+      Preconditions.checkNotNull(table);
+
+      final DeletedBlockLogStateManager impl =
+          new DeletedBlockLogStateManagerImpl(conf, table, transactionBuffer);
+
+      final SCMHAInvocationHandler invocationHandler =
+          new SCMHAInvocationHandler(SCMRatisProtocol.RequestType.BLOCK,
+              impl, scmRatisServer);
+
+      return (DeletedBlockLogStateManager) Proxy.newProxyInstance(
+          SCMHAInvocationHandler.class.getClassLoader(),
+          new Class<?>[]{DeletedBlockLogStateManager.class},
+          invocationHandler);
+    }
+
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index bc18078..edf6ca8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -18,17 +18,24 @@
 
 import java.io.IOException;
 import java.time.Duration;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.hdds.scm.ScmConfig;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMService;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -38,10 +45,12 @@
 import org.apache.hadoop.hdds.utils.BackgroundTaskResult.EmptyTaskResult;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -52,22 +61,32 @@
  * SCM HB thread polls cached commands and sends them to datanode for physical
  * processing.
  */
-public class SCMBlockDeletingService extends BackgroundService {
+public class SCMBlockDeletingService extends BackgroundService
+    implements SCMService {
 
   public static final Logger LOG =
       LoggerFactory.getLogger(SCMBlockDeletingService.class);
 
   private static final int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 1;
   private final DeletedBlockLog deletedBlockLog;
-  private final ContainerManager containerManager;
+  private final ContainerManagerV2 containerManager;
   private final NodeManager nodeManager;
   private final EventPublisher eventPublisher;
+  private final SCMContext scmContext;
 
   private int blockDeleteLimitSize;
 
+  /**
+   * SCMService related variables.
+   */
+  private final Lock serviceLock = new ReentrantLock();
+  private ServiceStatus serviceStatus = ServiceStatus.PAUSING;
+
+  @SuppressWarnings("parameternumber")
   public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
-      ContainerManager containerManager, NodeManager nodeManager,
-      EventPublisher eventPublisher, Duration interval, long serviceTimeout,
+      ContainerManagerV2 containerManager, NodeManager nodeManager,
+      EventPublisher eventPublisher, SCMContext scmContext,
+      SCMServiceManager serviceManager, Duration interval, long serviceTimeout,
       ConfigurationSource conf) {
     super("SCMBlockDeletingService", interval.toMillis(), TimeUnit.MILLISECONDS,
         BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
@@ -75,11 +94,15 @@
     this.containerManager = containerManager;
     this.nodeManager = nodeManager;
     this.eventPublisher = eventPublisher;
+    this.scmContext = scmContext;
 
     blockDeleteLimitSize =
         conf.getObject(ScmConfig.class).getBlockDeletionLimit();
     Preconditions.checkArgument(blockDeleteLimitSize > 0,
         "Block deletion limit should be " + "positive.");
+
+    // register SCMBlockDeletingService to SCMServiceManager
+    serviceManager.register(this);
   }
 
   @Override
@@ -113,6 +136,10 @@
 
     @Override
     public EmptyTaskResult call() throws Exception {
+      if (!shouldRun()) {
+        return EmptyTaskResult.newResult();
+      }
+
       long startTime = Time.monotonicNow();
       // Scan SCM DB in HB interval and collect a throttled list of
       // to delete blocks.
@@ -144,9 +171,10 @@
               // We should stop caching new commands if num of un-processed
               // command is bigger than a limit, e.g 50. In case datanode goes
               // offline for sometime, the cached commands be flooded.
+              SCMCommand<?> command = new DeleteBlocksCommand(dnTXs);
+              command.setTerm(scmContext.getTermOfLeader());
               eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND,
-                  new CommandForDatanode<>(dnId,
-                      new DeleteBlocksCommand(dnTXs)));
+                  new CommandForDatanode<>(dnId, command));
               if (LOG.isDebugEnabled()) {
                 LOG.debug(
                     "Added delete block command for datanode {} in the queue,"
@@ -157,13 +185,20 @@
               }
             }
           }
-
-          containerManager.updateDeleteTransactionId(containerIdToMaxTxnId);
+          // TODO: Fix ME!!!
+          Map<ContainerID, Long> transactionMap = new HashMap<>();
+          for (Map.Entry<Long, Long> tx : containerIdToMaxTxnId.entrySet()) {
+            transactionMap.put(ContainerID.valueOf(tx.getKey()), tx.getValue());
+          }
+          containerManager.updateDeleteTransactionId(transactionMap);
           LOG.info("Totally added {} blocks to be deleted for"
                   + " {} datanodes, task elapsed time: {}ms",
               transactions.getBlocksDeleted(),
               transactions.getDatanodeTransactionMap().size(),
               Time.monotonicNow() - startTime);
+        } catch (NotLeaderException nle) {
+          LOG.warn("Skip current run, since not leader any more.", nle);
+          return EmptyTaskResult.newResult();
         } catch (IOException e) {
           // We may tolerate a number of failures for sometime
           // but if it continues to fail, at some point we need to raise
@@ -184,4 +219,38 @@
   public void setBlockDeleteTXNum(int numTXs) {
     blockDeleteLimitSize = numTXs;
   }
+
+  @Override
+  public void notifyStatusChanged() {
+    serviceLock.lock();
+    try {
+      if (scmContext.isLeader()) {
+        serviceStatus = ServiceStatus.RUNNING;
+      } else {
+        serviceStatus = ServiceStatus.PAUSING;
+      }
+    } finally {
+      serviceLock.unlock();
+    }
+  }
+
+  @Override
+  public boolean shouldRun() {
+    serviceLock.lock();
+    try {
+      return serviceStatus == ServiceStatus.RUNNING;
+    } finally {
+      serviceLock.unlock();
+    }
+  }
+
+  @Override
+  public String getServiceName() {
+    return SCMBlockDeletingService.class.getSimpleName();
+  }
+
+  @Override
+  public void stop() {
+    throw new RuntimeException("Not supported operation.");
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
index 1b190a2..d8d31ae 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
@@ -27,9 +27,13 @@
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.slf4j.Logger;
 
 import java.io.IOException;
@@ -44,7 +48,8 @@
  */
 public class AbstractContainerReportHandler {
 
-  private final ContainerManager containerManager;
+  private final ContainerManagerV2 containerManager;
+  private final SCMContext scmContext;
   private final Logger logger;
 
   /**
@@ -54,11 +59,14 @@
    * @param containerManager ContainerManager
    * @param logger Logger to be used for logging
    */
-  AbstractContainerReportHandler(final ContainerManager containerManager,
+  AbstractContainerReportHandler(final ContainerManagerV2 containerManager,
+                                 final SCMContext scmContext,
                                  final Logger logger) {
     Preconditions.checkNotNull(containerManager);
+    Preconditions.checkNotNull(scmContext);
     Preconditions.checkNotNull(logger);
     this.containerManager = containerManager;
+    this.scmContext = scmContext;
     this.logger = logger;
   }
 
@@ -73,9 +81,9 @@
    */
   protected void processContainerReplica(final DatanodeDetails datanodeDetails,
       final ContainerReplicaProto replicaProto, final EventPublisher publisher)
-      throws IOException {
+      throws IOException, InvalidStateTransitionException {
     final ContainerID containerId = ContainerID
-        .valueof(replicaProto.getContainerID());
+        .valueOf(replicaProto.getContainerID());
 
     if (logger.isDebugEnabled()) {
       logger.debug("Processing replica of container {} from datanode {}",
@@ -166,7 +174,7 @@
                                     final ContainerID containerId,
                                     final ContainerReplicaProto replica,
                                     final EventPublisher publisher)
-      throws IOException {
+      throws IOException, InvalidStateTransitionException {
 
     final ContainerInfo container = containerManager
         .getContainer(containerId);
@@ -310,17 +318,23 @@
    * Return ContainerManager.
    * @return {@link ContainerManager}
    */
-  protected ContainerManager getContainerManager() {
+  protected ContainerManagerV2 getContainerManager() {
     return containerManager;
   }
 
   protected void deleteReplica(ContainerID containerID, DatanodeDetails dn,
       EventPublisher publisher, String reason) {
-    final DeleteContainerCommand deleteCommand =
-        new DeleteContainerCommand(containerID.getId(), true);
-    final CommandForDatanode datanodeCommand = new CommandForDatanode<>(
-        dn.getUuid(), deleteCommand);
-    publisher.fireEvent(SCMEvents.DATANODE_COMMAND, datanodeCommand);
+    SCMCommand<?> command = new DeleteContainerCommand(
+        containerID.getId(), true);
+    try {
+      command.setTerm(scmContext.getTermOfLeader());
+    } catch (NotLeaderException nle) {
+      logger.warn("Skip sending delete container command," +
+          " since not leader SCM", nle);
+      return;
+    }
+    publisher.fireEvent(SCMEvents.DATANODE_COMMAND,
+        new CommandForDatanode<>(dn.getUuid(), command));
     logger.info("Sending delete container command for " + reason +
         " container {} to datanode {}", containerID.getId(), dn);
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index fd73711..449252c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -23,12 +23,16 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,12 +52,15 @@
       LoggerFactory.getLogger(CloseContainerEventHandler.class);
 
   private final PipelineManager pipelineManager;
-  private final ContainerManager containerManager;
+  private final ContainerManagerV2 containerManager;
+  private final SCMContext scmContext;
 
   public CloseContainerEventHandler(final PipelineManager pipelineManager,
-      final ContainerManager containerManager) {
+                                    final ContainerManagerV2 containerManager,
+                                    final SCMContext scmContext) {
     this.pipelineManager = pipelineManager;
     this.containerManager = containerManager;
+    this.scmContext = scmContext;
   }
 
   @Override
@@ -72,20 +79,22 @@
           .getContainer(containerID);
       // Send close command to datanodes, if the container is in CLOSING state
       if (container.getState() == LifeCycleState.CLOSING) {
+        SCMCommand<?> command = new CloseContainerCommand(
+            containerID.getId(), container.getPipelineID());
+        command.setTerm(scmContext.getTermOfLeader());
 
-        final CloseContainerCommand closeContainerCommand =
-            new CloseContainerCommand(
-                containerID.getId(), container.getPipelineID());
-
-        getNodes(container).forEach(node -> publisher.fireEvent(
-            DATANODE_COMMAND,
-            new CommandForDatanode<>(node.getUuid(), closeContainerCommand)));
+        getNodes(container).forEach(node ->
+            publisher.fireEvent(DATANODE_COMMAND,
+                new CommandForDatanode<>(node.getUuid(), command)));
       } else {
         LOG.warn("Cannot close container {}, which is in {} state.",
             containerID, container.getState());
       }
 
-    } catch (IOException ex) {
+    } catch (NotLeaderException nle) {
+      LOG.warn("Skip sending close container command,"
+          + " since current SCM is not leader.", nle);
+    } catch (IOException | InvalidStateTransitionException ex) {
       LOG.error("Failed to close the container {}.", containerID, ex);
     }
   }
@@ -98,7 +107,7 @@
    * @throws ContainerNotFoundException
    */
   private List<DatanodeDetails> getNodes(final ContainerInfo container)
-      throws ContainerNotFoundException {
+      throws ContainerNotFoundException, NotLeaderException {
     try {
       return pipelineManager.getPipeline(container.getPipelineID()).getNodes();
     } catch (PipelineNotFoundException ex) {
@@ -109,5 +118,4 @@
           .collect(Collectors.toList());
     }
   }
-
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
index e79f268..3d53e29 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
@@ -45,7 +45,7 @@
     DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails();
     for (ContainerAction action : containerReportFromDatanode.getReport()
         .getContainerActionsList()) {
-      ContainerID containerId = ContainerID.valueof(action.getContainerID());
+      ContainerID containerId = ContainerID.valueOf(action.getContainerID());
       switch (action.getAction()) {
       case CLOSE:
         if (LOG.isDebugEnabled()) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
new file mode 100644
index 0000000..4609874
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
@@ -0,0 +1,427 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator.CONTAINER_ID;
+
+/**
+ * TODO: Add javadoc.
+ */
+public class ContainerManagerImpl implements ContainerManagerV2 {
+
+  /*
+   * TODO: Introduce container level locks.
+   */
+
+  /**
+   *
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ContainerManagerImpl.class);
+
+  /**
+   *
+   */
+  // Limit the number of on-going ratis operations.
+  private final Lock lock;
+
+  /**
+   *
+   */
+  private final PipelineManager pipelineManager;
+
+  /**
+   *
+   */
+  private final ContainerStateManagerV2 containerStateManager;
+
+  private final SCMHAManager haManager;
+  private final SequenceIdGenerator sequenceIdGen;
+
+  // TODO: Revisit this.
+  // Metrics related to operations should be moved to ProtocolServer
+  private final SCMContainerManagerMetrics scmContainerManagerMetrics;
+
+  private final int numContainerPerVolume;
+  private final Random random = new Random();
+
+  /**
+   *
+   */
+  public ContainerManagerImpl(
+      final Configuration conf,
+      final SCMHAManager scmHaManager,
+      final SequenceIdGenerator sequenceIdGen,
+      final PipelineManager pipelineManager,
+      final Table<ContainerID, ContainerInfo> containerStore)
+      throws IOException {
+    // Introduce builder for this class?
+    this.lock = new ReentrantLock();
+    this.pipelineManager = pipelineManager;
+    this.haManager = scmHaManager;
+    this.sequenceIdGen = sequenceIdGen;
+    this.containerStateManager = ContainerStateManagerImpl.newBuilder()
+        .setConfiguration(conf)
+        .setPipelineManager(pipelineManager)
+        .setRatisServer(scmHaManager.getRatisServer())
+        .setContainerStore(containerStore)
+        .setSCMDBTransactionBuffer(scmHaManager.getDBTransactionBuffer())
+        .build();
+
+    this.numContainerPerVolume = conf
+        .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
+            ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
+
+    this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create();
+  }
+
+  @Override
+  public void reinitialize(Table<ContainerID, ContainerInfo> containerStore)
+      throws IOException {
+    lock.lock();
+    try {
+      containerStateManager.reinitialize(containerStore);
+    } catch (IOException ioe) {
+      LOG.error("Failed to reinitialize containerManager", ioe);
+      throw ioe;
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Override
+  public ContainerInfo getContainer(final ContainerID id)
+      throws ContainerNotFoundException {
+    return Optional.ofNullable(containerStateManager
+        .getContainer(id.getProtobuf()))
+        .orElseThrow(() -> new ContainerNotFoundException("ID " + id));
+  }
+
+  @Override
+  public List<ContainerInfo> getContainers(final ContainerID startID,
+                                           final int count) {
+    scmContainerManagerMetrics.incNumListContainersOps();
+    // TODO: Remove the null check, startID should not be null. Fix the unit
+    //  test before removing the check.
+    final long start = startID == null ? 0 : startID.getId();
+    final List<ContainerID> containersIds =
+        new ArrayList<>(containerStateManager.getContainerIDs());
+    Collections.sort(containersIds);
+    return containersIds.stream()
+        .filter(id -> id.getId() >= start).limit(count)
+        .map(ContainerID::getProtobuf)
+        .map(containerStateManager::getContainer)
+        .collect(Collectors.toList());
+  }
+
+  @Override
+  public List<ContainerInfo> getContainers(final LifeCycleState state) {
+    return containerStateManager.getContainerIDs(state).stream()
+        .map(ContainerID::getProtobuf)
+        .map(containerStateManager::getContainer)
+        .filter(Objects::nonNull).collect(Collectors.toList());
+  }
+
+  @Override
+  public ContainerInfo allocateContainer(final ReplicationType type,
+      final ReplicationFactor replicationFactor, final String owner)
+      throws IOException {
+    lock.lock();
+    try {
+      final List<Pipeline> pipelines = pipelineManager
+          .getPipelines(type, replicationFactor, Pipeline.PipelineState.OPEN);
+
+      final Pipeline pipeline;
+      if (pipelines.isEmpty()) {
+        try {
+          pipeline = pipelineManager.createPipeline(type, replicationFactor);
+          pipelineManager.waitPipelineReady(pipeline.getId(), 0);
+        } catch (IOException e) {
+          scmContainerManagerMetrics.incNumFailureCreateContainers();
+          throw new IOException("Could not allocate container. Cannot get any" +
+              " matching pipeline for Type:" + type + ", Factor:" +
+              replicationFactor + ", State:PipelineState.OPEN", e);
+        }
+      } else {
+        pipeline = pipelines.get(random.nextInt(pipelines.size()));
+      }
+      final ContainerInfo containerInfo = allocateContainer(pipeline, owner);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("New container allocated: {}", containerInfo);
+      }
+      return containerInfo;
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  private ContainerInfo allocateContainer(final Pipeline pipeline,
+                                          final String owner)
+      throws IOException {
+    final long uniqueId = sequenceIdGen.getNextId(CONTAINER_ID);
+    Preconditions.checkState(uniqueId > 0,
+        "Cannot allocate container, negative container id" +
+            " generated. %s.", uniqueId);
+    final ContainerID containerID = ContainerID.valueOf(uniqueId);
+    final ContainerInfoProto containerInfo = ContainerInfoProto.newBuilder()
+        .setState(LifeCycleState.OPEN)
+        .setPipelineID(pipeline.getId().getProtobuf())
+        .setUsedBytes(0)
+        .setNumberOfKeys(0)
+        .setStateEnterTime(Time.now())
+        .setOwner(owner)
+        .setContainerID(containerID.getId())
+        .setDeleteTransactionId(0)
+        .setReplicationFactor(pipeline.getFactor())
+        .setReplicationType(pipeline.getType())
+        .build();
+    containerStateManager.addContainer(containerInfo);
+    scmContainerManagerMetrics.incNumSuccessfulCreateContainers();
+    return containerStateManager.getContainer(containerID.getProtobuf());
+  }
+
+  @Override
+  public void updateContainerState(final ContainerID id,
+                                   final LifeCycleEvent event)
+      throws IOException, InvalidStateTransitionException {
+    final HddsProtos.ContainerID cid = id.getProtobuf();
+    lock.lock();
+    try {
+      if (containerExist(cid)) {
+        containerStateManager.updateContainerState(cid, event);
+      } else {
+        throwContainerNotFoundException(cid);
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Override
+  public Set<ContainerReplica> getContainerReplicas(final ContainerID id)
+      throws ContainerNotFoundException {
+    return Optional.ofNullable(containerStateManager
+        .getContainerReplicas(id.getProtobuf()))
+        .orElseThrow(() -> new ContainerNotFoundException("ID " + id));
+  }
+
+  @Override
+  public void updateContainerReplica(final ContainerID id,
+                                     final ContainerReplica replica)
+      throws ContainerNotFoundException {
+    final HddsProtos.ContainerID cid = id.getProtobuf();
+    if (containerExist(cid)) {
+      containerStateManager.updateContainerReplica(cid, replica);
+    } else {
+      throwContainerNotFoundException(cid);
+    }
+  }
+
+  @Override
+  public void removeContainerReplica(final ContainerID id,
+                                     final ContainerReplica replica)
+      throws ContainerNotFoundException, ContainerReplicaNotFoundException {
+    final HddsProtos.ContainerID cid = id.getProtobuf();
+    if (containerExist(cid)) {
+      containerStateManager.removeContainerReplica(cid, replica);
+    } else {
+      throwContainerNotFoundException(cid);
+    }
+  }
+
+  @Override
+  public void updateDeleteTransactionId(
+      final Map<ContainerID, Long> deleteTransactionMap) throws IOException {
+    containerStateManager.updateDeleteTransactionId(deleteTransactionMap);
+  }
+
+  @Override
+  public ContainerInfo getMatchingContainer(final long size, final String owner,
+      final Pipeline pipeline, final Set<ContainerID> excludedContainerIDs) {
+    NavigableSet<ContainerID> containerIDs;
+    ContainerInfo containerInfo;
+    try {
+      synchronized (pipeline.getId()) {
+        containerIDs = getContainersForOwner(pipeline, owner);
+        if (containerIDs.size() < getOpenContainerCountPerPipeline(pipeline)) {
+          allocateContainer(pipeline, owner);
+          containerIDs = getContainersForOwner(pipeline, owner);
+        }
+        containerIDs.removeAll(excludedContainerIDs);
+        containerInfo = containerStateManager.getMatchingContainer(
+            size, owner, pipeline.getId(), containerIDs);
+        if (containerInfo == null) {
+          containerInfo = allocateContainer(pipeline, owner);
+        }
+        return containerInfo;
+      }
+    } catch (Exception e) {
+      LOG.warn("Container allocation failed on pipeline={}", pipeline, e);
+      return null;
+    }
+  }
+
+  private int getOpenContainerCountPerPipeline(Pipeline pipeline) {
+    int minContainerCountPerDn = numContainerPerVolume *
+        pipelineManager.minHealthyVolumeNum(pipeline);
+    int minPipelineCountPerDn = pipelineManager.minPipelineLimit(pipeline);
+    return (int) Math.ceil(
+        ((double) minContainerCountPerDn / minPipelineCountPerDn));
+  }
+
+  /**
+   * Returns the container ID's matching with specified owner.
+   * @param pipeline
+   * @param owner
+   * @return NavigableSet<ContainerID>
+   */
+  private NavigableSet<ContainerID> getContainersForOwner(
+      Pipeline pipeline, String owner) throws IOException {
+    NavigableSet<ContainerID> containerIDs =
+        pipelineManager.getContainersInPipeline(pipeline.getId());
+    Iterator<ContainerID> containerIDIterator = containerIDs.iterator();
+    while (containerIDIterator.hasNext()) {
+      ContainerID cid = containerIDIterator.next();
+      try {
+        if (!getContainer(cid).getOwner().equals(owner)) {
+          containerIDIterator.remove();
+        }
+      } catch (ContainerNotFoundException e) {
+        LOG.error("Could not find container info for container {}", cid, e);
+        containerIDIterator.remove();
+      }
+    }
+    return containerIDs;
+  }
+
+  @Override
+  public void notifyContainerReportProcessing(final boolean isFullReport,
+                                              final boolean success) {
+    if (isFullReport) {
+      if (success) {
+        scmContainerManagerMetrics.incNumContainerReportsProcessedSuccessful();
+      } else {
+        scmContainerManagerMetrics.incNumContainerReportsProcessedFailed();
+      }
+    } else {
+      if (success) {
+        scmContainerManagerMetrics.incNumICRReportsProcessedSuccessful();
+      } else {
+        scmContainerManagerMetrics.incNumICRReportsProcessedFailed();
+      }
+    }
+  }
+
+  @Override
+  public void deleteContainer(final ContainerID id)
+      throws IOException {
+    final HddsProtos.ContainerID cid = id.getProtobuf();
+    lock.lock();
+    try {
+      if (containerExist(cid)) {
+        containerStateManager.removeContainer(cid);
+        scmContainerManagerMetrics.incNumSuccessfulDeleteContainers();
+      } else {
+        scmContainerManagerMetrics.incNumFailureDeleteContainers();
+        throwContainerNotFoundException(cid);
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Deprecated
+  private void checkIfContainerExist(final HddsProtos.ContainerID id)
+      throws ContainerNotFoundException {
+    if (!containerStateManager.contains(id)) {
+      throw new ContainerNotFoundException("Container with id #" +
+          id.getId() + " not found.");
+    }
+  }
+
+  @Override
+  public boolean containerExist(final ContainerID id) {
+    return containerExist(id.getProtobuf());
+  }
+
+  private boolean containerExist(final HddsProtos.ContainerID id) {
+    return containerStateManager.contains(id);
+  }
+
+  private void throwContainerNotFoundException(final HddsProtos.ContainerID id)
+      throws ContainerNotFoundException {
+    throw new ContainerNotFoundException("Container with id #" +
+        id.getId() + " not found.");
+  }
+
+  @Override
+  public void close() throws IOException {
+    containerStateManager.close();
+  }
+
+  // Remove this after fixing Recon
+  @Deprecated
+  protected ContainerStateManagerV2 getContainerStateManager() {
+    return containerStateManager;
+  }
+
+  @VisibleForTesting
+  public SCMHAManager getSCMHAManager() {
+    return haManager;
+  }
+
+  public Set<ContainerID> getContainerIDs() {
+    return containerStateManager.getContainerIDs();
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
new file mode 100644
index 0000000..6f0233d
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
+
+/**
+ * TODO: Add extensive javadoc.
+ *
+ * ContainerManager class contains the mapping from a name to a pipeline
+ * mapping. This is used by SCM when allocating new locations and when
+ * looking up a key.
+ */
+public interface ContainerManagerV2 extends Closeable {
+  // TODO: Rename this to ContainerManager
+
+  /**
+   * Reinitialize the containerManager with the updated container store.
+   * @param containerStore Container Table
+   * @throws IOException
+   */
+  void reinitialize(Table<ContainerID, ContainerInfo> containerStore)
+      throws IOException;
+
+  /**
+   * Returns the ContainerInfo from the container ID.
+   *
+   */
+  ContainerInfo getContainer(ContainerID containerID)
+      throws ContainerNotFoundException;
+
+
+  default List<ContainerInfo> getContainers() {
+    return getContainers(ContainerID.valueOf(0), Integer.MAX_VALUE);
+  }
+  /**
+   * Returns containers under certain conditions.
+   * Search container IDs from start ID(exclusive),
+   * The max size of the searching range cannot exceed the
+   * value of count.
+   *
+   * @param startID start containerID, >=0,
+   * start searching at the head if 0.
+   * @param count count must be >= 0
+   *              Usually the count will be replace with a very big
+   *              value instead of being unlimited in case the db is very big.
+   *
+   * @return a list of container.
+   */
+  List<ContainerInfo> getContainers(ContainerID startID, int count);
+
+
+  /**
+   * Returns all the containers which are in the specified state.
+   *
+   * @return List of ContainerInfo
+   */
+  List<ContainerInfo> getContainers(LifeCycleState state);
+
+  /**
+   * Returns true if the container exist, false otherwise.
+   * @param id Container ID
+   * @return true if container exist, else false
+   */
+  boolean containerExist(ContainerID id);
+
+  /**
+   * Allocates a new container for a given keyName and replication factor.
+   *
+   * @param replicationFactor - replication factor of the container.
+   * @param owner
+   * @return - ContainerInfo.
+   * @throws IOException
+   */
+  ContainerInfo allocateContainer(ReplicationType type,
+                                  ReplicationFactor replicationFactor,
+                                  String owner) throws IOException;
+
+  /**
+   * Update container state.
+   * @param containerID - Container ID
+   * @param event - container life cycle event
+   * @throws IOException
+   * @throws InvalidStateTransitionException
+   */
+  void updateContainerState(ContainerID containerID,
+                            LifeCycleEvent event)
+      throws IOException, InvalidStateTransitionException;
+
+  /**
+   * Returns the latest list of replicas for given containerId.
+   *
+   * @param containerID Container ID
+   * @return Set of ContainerReplica
+   */
+  Set<ContainerReplica> getContainerReplicas(ContainerID containerID)
+      throws ContainerNotFoundException;
+
+  /**
+   * Adds a container Replica for the given Container.
+   *
+   * @param containerID Container ID
+   * @param replica ContainerReplica
+   */
+  void updateContainerReplica(ContainerID containerID, ContainerReplica replica)
+      throws ContainerNotFoundException;
+
+  /**
+   * Remove a container Replica form a given Container.
+   *
+   * @param containerID Container ID
+   * @param replica ContainerReplica
+   * @return True of dataNode is removed successfully else false.
+   */
+  void removeContainerReplica(ContainerID containerID, ContainerReplica replica)
+      throws ContainerNotFoundException, ContainerReplicaNotFoundException;
+
+  /**
+   * Update deleteTransactionId according to deleteTransactionMap.
+   *
+   * @param deleteTransactionMap Maps the containerId to latest delete
+   *                             transaction id for the container.
+   * @throws IOException
+   */
+  void updateDeleteTransactionId(Map<ContainerID, Long> deleteTransactionMap)
+      throws IOException;
+
+  default ContainerInfo getMatchingContainer(long size, String owner,
+                                     Pipeline pipeline) {
+    return getMatchingContainer(size, owner, pipeline, Collections.emptySet());
+  }
+
+  /**
+   * Returns ContainerInfo which matches the requirements.
+   * @param size - the amount of space required in the container
+   * @param owner - the user which requires space in its owned container
+   * @param pipeline - pipeline to which the container should belong.
+   * @param excludedContainerIDS - containerIds to be excluded.
+   * @return ContainerInfo for the matching container.
+   */
+  ContainerInfo getMatchingContainer(long size, String owner,
+                                     Pipeline pipeline,
+                                     Set<ContainerID> excludedContainerIDS);
+
+  /**
+   * Once after report processor handler completes, call this to notify
+   * container manager to increment metrics.
+   * @param isFullReport
+   * @param success
+   */
+  // Is it possible to remove this from the Interface?
+  void notifyContainerReportProcessing(boolean isFullReport, boolean success);
+
+  /**
+   * Deletes a container from SCM.
+   *
+   * @param containerID - Container ID
+   * @throws IOException
+   */
+  void deleteContainer(ContainerID containerID)
+      throws IOException;
+
+  /**
+   * Returns the list of containersIDs.
+   * @return list of containerIDs
+   */
+  Set<ContainerID> getContainerIDs();
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 7bca64f..48603c0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -27,12 +27,14 @@
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .ContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -52,7 +54,7 @@
       LoggerFactory.getLogger(ContainerReportHandler.class);
 
   private final NodeManager nodeManager;
-  private final ContainerManager containerManager;
+  private final ContainerManagerV2 containerManager;
   private final String unknownContainerHandleAction;
 
   /**
@@ -71,9 +73,10 @@
    * @param conf OzoneConfiguration instance
    */
   public ContainerReportHandler(final NodeManager nodeManager,
-                                final ContainerManager containerManager,
+                                final ContainerManagerV2 containerManager,
+                                final SCMContext scmContext,
                                 OzoneConfiguration conf) {
-    super(containerManager, LOG);
+    super(containerManager, scmContext, LOG);
     this.nodeManager = nodeManager;
     this.containerManager = containerManager;
 
@@ -86,8 +89,8 @@
   }
 
   public ContainerReportHandler(final NodeManager nodeManager,
-      final ContainerManager containerManager) {
-    this(nodeManager, containerManager, null);
+      final ContainerManagerV2 containerManager) {
+    this(nodeManager, containerManager, SCMContext.emptyContext(), null);
   }
 
   /**
@@ -120,7 +123,7 @@
 
       final Set<ContainerID> containersInDn = replicas.parallelStream()
           .map(ContainerReplicaProto::getContainerID)
-          .map(ContainerID::valueof).collect(Collectors.toSet());
+          .map(ContainerID::valueOf).collect(Collectors.toSet());
 
       final Set<ContainerID> missingReplicas = new HashSet<>(containersInSCM);
       missingReplicas.removeAll(containersInDn);
@@ -167,10 +170,10 @@
         } else if (unknownContainerHandleAction.equals(
             UNKNOWN_CONTAINER_ACTION_DELETE)) {
           final ContainerID containerId = ContainerID
-              .valueof(replicaProto.getContainerID());
+              .valueOf(replicaProto.getContainerID());
           deleteReplica(containerId, datanodeDetails, publisher, "unknown");
         }
-      } catch (IOException e) {
+      } catch (IOException | InvalidStateTransitionException e) {
         LOG.error("Exception while processing container report for container" +
                 " {} from datanode {}.", replicaProto.getContainerID(),
             datanodeDetails, e);
@@ -221,7 +224,7 @@
     for (ContainerReplicaProto replica : replicas) {
       try {
         final ContainerInfo containerInfo = containerManager.getContainer(
-            ContainerID.valueof(replica.getContainerID()));
+            ContainerID.valueOf(replica.getContainerID()));
         if (containerInfo.getDeleteTransactionId() >
             replica.getDeleteTransactionId()) {
           pendingDeleteStatusList.addPendingDeleteStatus(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index e575c60..0c3772f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -329,7 +329,7 @@
       // In Recon, while adding a 'new' CLOSED container, pipeline will be a
       // random ID, and hence be passed down as null.
       pipelineManager.addContainerToPipeline(pipeline.getId(),
-          ContainerID.valueof(containerID));
+          ContainerID.valueOf(containerID));
     }
     containerStateCount.incrementAndGet(containerInfo.getState());
   }
@@ -371,12 +371,8 @@
   void updateDeleteTransactionId(
       final Map<Long, Long> deleteTransactionMap) {
     deleteTransactionMap.forEach((k, v) -> {
-      try {
-        containers.getContainerInfo(ContainerID.valueof(k))
-            .updateDeleteTransactionId(v);
-      } catch (ContainerNotFoundException e) {
-        LOG.warn("Exception while updating delete transaction id.", e);
-      }
+      containers.getContainerInfo(ContainerID.valueOf(k))
+          .updateDeleteTransactionId(v);
     });
   }
 
@@ -432,18 +428,13 @@
   private ContainerInfo findContainerWithSpace(final long size,
       final NavigableSet<ContainerID> searchSet, final String owner,
       final PipelineID pipelineID) {
-    try {
-      // Get the container with space to meet our request.
-      for (ContainerID id : searchSet) {
-        final ContainerInfo containerInfo = containers.getContainerInfo(id);
-        if (containerInfo.getUsedBytes() + size <= this.containerSize) {
-          containerInfo.updateLastUsedTime();
-          return containerInfo;
-        }
+    // Get the container with space to meet our request.
+    for (ContainerID id : searchSet) {
+      final ContainerInfo containerInfo = containers.getContainerInfo(id);
+      if (containerInfo.getUsedBytes() + size <= this.containerSize) {
+        containerInfo.updateLastUsedTime();
+        return containerInfo;
       }
-    } catch (ContainerNotFoundException e) {
-      // This should not happen!
-      LOG.warn("Exception while finding container with space", e);
     }
     return null;
   }
@@ -496,7 +487,11 @@
    */
   ContainerInfo getContainer(final ContainerID containerID)
       throws ContainerNotFoundException {
-    return containers.getContainerInfo(containerID);
+    final ContainerInfo container = containers.getContainerInfo(containerID);
+    if (container != null) {
+      return container;
+    }
+    throw new ContainerNotFoundException(containerID.toString());
   }
 
   void close() throws IOException {
@@ -540,6 +535,9 @@
 
   void removeContainer(final ContainerID containerID)
       throws ContainerNotFoundException {
+    if (containers.getContainerInfo(containerID) == null) {
+      throw new ContainerNotFoundException(containerID.toString());
+    }
     containers.removeContainer(containerID);
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
new file mode 100644
index 0000000..2fc71bc
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
@@ -0,0 +1,573 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import java.io.IOException;
+import java.lang.reflect.Proxy;
+import java.util.EnumMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.states.ContainerState;
+import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
+import org.apache.hadoop.hdds.scm.ha.CheckedConsumer;
+import org.apache.hadoop.hdds.scm.ha.ExecutionUtil;
+import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
+import org.apache.hadoop.ozone.common.statemachine.StateMachine;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FINALIZE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.QUASI_CLOSE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CLOSE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FORCE_CLOSE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.DELETE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CLEANUP;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETING;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED;
+
+/**
+ * Default implementation of ContainerStateManager. This implementation
+ * holds the Container States in-memory which is backed by a persistent store.
+ * The persistent store is always kept in sync with the in-memory state changes.
+ *
+ * This class is NOT thread safe. All the calls are idempotent.
+ */
+public final class ContainerStateManagerImpl
+    implements ContainerStateManagerV2 {
+
+  /**
+   * Logger instance of ContainerStateManagerImpl.
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ContainerStateManagerImpl.class);
+
+  /**
+   * Configured container size.
+   */
+  private final long containerSize;
+
+  /**
+   * In-memory representation of Container States.
+   */
+  private ContainerStateMap containers;
+
+  /**
+   * Persistent store for Container States.
+   */
+  private Table<ContainerID, ContainerInfo> containerStore;
+
+  private final DBTransactionBuffer transactionBuffer;
+
+  /**
+   * PipelineManager instance.
+   */
+  private final PipelineManager pipelineManager;
+
+  /**
+   * Container lifecycle state machine.
+   */
+  private final StateMachine<LifeCycleState, LifeCycleEvent> stateMachine;
+
+  /**
+   * We use the containers in round-robin fashion for operations like block
+   * allocation. This map is used for remembering the last used container.
+   */
+  private ConcurrentHashMap<ContainerState, ContainerID> lastUsedMap;
+
+  private final Map<LifeCycleEvent, CheckedConsumer<ContainerInfo, IOException>>
+      containerStateChangeActions;
+
+  // Protect containers and containerStore against the potential
+  // contentions between RaftServer and ContainerManager.
+  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+
+  /**
+   * constructs ContainerStateManagerImpl instance and loads the containers
+   * form the persistent storage.
+   *
+   * @param conf the Configuration
+   * @param pipelineManager the {@link PipelineManager} instance
+   * @param containerStore the persistent storage
+   * @throws IOException in case of error while loading the containers
+   */
+  private ContainerStateManagerImpl(final Configuration conf,
+      final PipelineManager pipelineManager,
+      final Table<ContainerID, ContainerInfo> containerStore,
+      final DBTransactionBuffer buffer)
+      throws IOException {
+    this.pipelineManager = pipelineManager;
+    this.containerStore = containerStore;
+    this.stateMachine = newStateMachine();
+    this.containerSize = getConfiguredContainerSize(conf);
+    this.containers = new ContainerStateMap();
+    this.lastUsedMap = new ConcurrentHashMap<>();
+    this.containerStateChangeActions = getContainerStateChangeActions();
+    this.transactionBuffer = buffer;
+
+    initialize();
+  }
+
+  /**
+   * Creates and initializes a new Container Lifecycle StateMachine.
+   *
+   * @return the Container Lifecycle StateMachine
+   */
+  private StateMachine<LifeCycleState, LifeCycleEvent> newStateMachine() {
+
+    final Set<LifeCycleState> finalStates = new HashSet<>();
+
+    // These are the steady states of a container.
+    finalStates.add(CLOSED);
+    finalStates.add(DELETED);
+
+    final StateMachine<LifeCycleState, LifeCycleEvent> containerLifecycleSM =
+        new StateMachine<>(OPEN, finalStates);
+
+    containerLifecycleSM.addTransition(OPEN, CLOSING, FINALIZE);
+    containerLifecycleSM.addTransition(CLOSING, QUASI_CLOSED, QUASI_CLOSE);
+    containerLifecycleSM.addTransition(CLOSING, CLOSED, CLOSE);
+    containerLifecycleSM.addTransition(QUASI_CLOSED, CLOSED, FORCE_CLOSE);
+    containerLifecycleSM.addTransition(CLOSED, DELETING, DELETE);
+    containerLifecycleSM.addTransition(DELETING, DELETED, CLEANUP);
+
+    /* The following set of transitions are to make state machine
+     * transition idempotent.
+     */
+    makeStateTransitionIdempotent(containerLifecycleSM, FINALIZE,
+        CLOSING, QUASI_CLOSED, CLOSED, DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, QUASI_CLOSE,
+        QUASI_CLOSED, CLOSED, DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, CLOSE,
+        CLOSED, DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, FORCE_CLOSE,
+        CLOSED, DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, DELETE,
+        DELETING, DELETED);
+    makeStateTransitionIdempotent(containerLifecycleSM, CLEANUP, DELETED);
+
+    return containerLifecycleSM;
+  }
+
+  private void makeStateTransitionIdempotent(
+      final StateMachine<LifeCycleState, LifeCycleEvent> sm,
+      final LifeCycleEvent event, final LifeCycleState... states) {
+    for (LifeCycleState state : states) {
+      sm.addTransition(state, state, event);
+    }
+  }
+
+  /**
+   * Returns the configured container size.
+   *
+   * @return the max size of container
+   */
+  private long getConfiguredContainerSize(final Configuration conf) {
+    return (long) conf.getStorageSize(
+        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
+        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
+        StorageUnit.BYTES);
+  }
+
+  /**
+   * Loads the containers from container store into memory.
+   *
+   * @throws IOException in case of error while loading the containers
+   */
+  private void initialize() throws IOException {
+    TableIterator<ContainerID, ? extends KeyValue<ContainerID, ContainerInfo>>
+        iterator = containerStore.iterator();
+
+    while (iterator.hasNext()) {
+      final ContainerInfo container = iterator.next().getValue();
+      Preconditions.checkNotNull(container);
+      containers.addContainer(container);
+      if (container.getState() == LifeCycleState.OPEN) {
+        try {
+          pipelineManager.addContainerToPipeline(container.getPipelineID(),
+              container.containerID());
+        } catch (PipelineNotFoundException ex) {
+          LOG.warn("Found container {} which is in OPEN state with " +
+                  "pipeline {} that does not exist. Marking container for " +
+                  "closing.", container, container.getPipelineID());
+          try {
+            updateContainerState(container.containerID().getProtobuf(),
+                LifeCycleEvent.FINALIZE);
+          } catch (InvalidStateTransitionException e) {
+            // This cannot happen.
+            LOG.warn("Unable to finalize Container {}.", container);
+          }
+        }
+      }
+    }
+  }
+
+  private Map<LifeCycleEvent, CheckedConsumer<ContainerInfo, IOException>>
+      getContainerStateChangeActions() {
+    final Map<LifeCycleEvent, CheckedConsumer<ContainerInfo, IOException>>
+        actions = new EnumMap<>(LifeCycleEvent.class);
+    actions.put(FINALIZE, info -> pipelineManager
+        .removeContainerFromPipeline(info.getPipelineID(), info.containerID()));
+    return actions;
+  }
+
+  @Override
+  public Set<ContainerID> getContainerIDs() {
+    lock.readLock().lock();
+    try {
+      return containers.getAllContainerIDs();
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public Set<ContainerID> getContainerIDs(final LifeCycleState state) {
+    lock.readLock().lock();
+    try {
+      return containers.getContainerIDsByState(state);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public ContainerInfo getContainer(final HddsProtos.ContainerID id) {
+    lock.readLock().lock();
+    try {
+      return containers.getContainerInfo(ContainerID.getFromProtobuf(id));
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void addContainer(final ContainerInfoProto containerInfo)
+      throws IOException {
+
+    // Change the exception thrown to PipelineNotFound and
+    // ClosedPipelineException once ClosedPipelineException is introduced
+    // in PipelineManager.
+
+    Preconditions.checkNotNull(containerInfo);
+    final ContainerInfo container = ContainerInfo.fromProtobuf(containerInfo);
+    final ContainerID containerID = container.containerID();
+    final PipelineID pipelineID = container.getPipelineID();
+
+    lock.writeLock().lock();
+    try {
+      if (!containers.contains(containerID)) {
+        ExecutionUtil.create(() -> {
+          transactionBuffer.addToBuffer(containerStore, containerID, container);
+          containers.addContainer(container);
+          pipelineManager.addContainerToPipeline(pipelineID, containerID);
+        }).onException(() -> {
+          containers.removeContainer(containerID);
+          transactionBuffer.removeFromBuffer(containerStore, containerID);
+        }).execute();
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public boolean contains(final HddsProtos.ContainerID id) {
+    lock.readLock().lock();
+    try {
+      // TODO: Remove the protobuf conversion after fixing ContainerStateMap.
+      return containers.contains(ContainerID.getFromProtobuf(id));
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void updateContainerState(final HddsProtos.ContainerID containerID,
+                                   final LifeCycleEvent event)
+      throws IOException, InvalidStateTransitionException {
+    // TODO: Remove the protobuf conversion after fixing ContainerStateMap.
+    final ContainerID id = ContainerID.getFromProtobuf(containerID);
+
+    lock.writeLock().lock();
+    try {
+      if (containers.contains(id)) {
+        final ContainerInfo oldInfo = containers.getContainerInfo(id);
+        final LifeCycleState oldState = oldInfo.getState();
+        final LifeCycleState newState = stateMachine.getNextState(
+            oldInfo.getState(), event);
+        if (newState.getNumber() > oldState.getNumber()) {
+          ExecutionUtil.create(() -> {
+            containers.updateState(id, oldState, newState);
+            transactionBuffer.addToBuffer(containerStore, id,
+                containers.getContainerInfo(id));
+          }).onException(() -> {
+            transactionBuffer.addToBuffer(containerStore, id, oldInfo);
+            containers.updateState(id, newState, oldState);
+          }).execute();
+          containerStateChangeActions.getOrDefault(event, info -> {
+          }).execute(oldInfo);
+        }
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+
+  @Override
+  public Set<ContainerReplica> getContainerReplicas(
+      final HddsProtos.ContainerID id) {
+    lock.readLock().lock();
+    try {
+      return containers.getContainerReplicas(
+          ContainerID.getFromProtobuf(id));
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void updateContainerReplica(final HddsProtos.ContainerID id,
+                                     final ContainerReplica replica) {
+    lock.writeLock().lock();
+    try {
+      containers.updateContainerReplica(ContainerID.getFromProtobuf(id),
+          replica);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void removeContainerReplica(final HddsProtos.ContainerID id,
+                                     final ContainerReplica replica) {
+    lock.writeLock().lock();
+    try {
+      containers.removeContainerReplica(ContainerID.getFromProtobuf(id),
+          replica);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void updateDeleteTransactionId(
+      final Map<ContainerID, Long> deleteTransactionMap) throws IOException {
+    lock.writeLock().lock();
+    try {
+      // TODO: Refactor this. Error handling is not done.
+      for (Map.Entry<ContainerID, Long> transaction :
+          deleteTransactionMap.entrySet()) {
+        final ContainerInfo info = containers.getContainerInfo(
+            transaction.getKey());
+        info.updateDeleteTransactionId(transaction.getValue());
+        containerStore.put(info.containerID(), info);
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  public ContainerInfo getMatchingContainer(final long size, String owner,
+      PipelineID pipelineID, NavigableSet<ContainerID> containerIDs) {
+    if (containerIDs.isEmpty()) {
+      return null;
+    }
+
+    // Get the last used container and find container above the last used
+    // container ID.
+    final ContainerState key = new ContainerState(owner, pipelineID);
+    final ContainerID lastID =
+        lastUsedMap.getOrDefault(key, containerIDs.first());
+
+    // There is a small issue here. The first time, we will skip the first
+    // container. But in most cases it will not matter.
+    NavigableSet<ContainerID> resultSet = containerIDs.tailSet(lastID, false);
+    if (resultSet.isEmpty()) {
+      resultSet = containerIDs;
+    }
+
+    lock.readLock().lock();
+    try {
+      ContainerInfo selectedContainer = findContainerWithSpace(size, resultSet);
+      if (selectedContainer == null) {
+
+        // If we did not find any space in the tailSet, we need to look for
+        // space in the headset, we need to pass true to deal with the
+        // situation that we have a lone container that has space. That is we
+        // ignored the last used container under the assumption we can find
+        // other containers with space, but if have a single container that is
+        // not true. Hence we need to include the last used container as the
+        // last element in the sorted set.
+
+        resultSet = containerIDs.headSet(lastID, true);
+        selectedContainer = findContainerWithSpace(size, resultSet);
+      }
+
+      // TODO: cleanup entries in lastUsedMap
+      if (selectedContainer != null) {
+        lastUsedMap.put(key, selectedContainer.containerID());
+      }
+      return selectedContainer;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  private ContainerInfo findContainerWithSpace(final long size,
+      final NavigableSet<ContainerID> searchSet) {
+    // Get the container with space to meet our request.
+    for (ContainerID id : searchSet) {
+      final ContainerInfo containerInfo = containers.getContainerInfo(id);
+      if (containerInfo.getUsedBytes() + size <= this.containerSize) {
+        containerInfo.updateLastUsedTime();
+        return containerInfo;
+      }
+    }
+    return null;
+  }
+
+
+  public void removeContainer(final HddsProtos.ContainerID id)
+      throws IOException {
+    lock.writeLock().lock();
+    try {
+      final ContainerID cid = ContainerID.getFromProtobuf(id);
+      final ContainerInfo containerInfo = containers.getContainerInfo(cid);
+      ExecutionUtil.create(() -> {
+        transactionBuffer.removeFromBuffer(containerStore, cid);
+        containers.removeContainer(cid);
+      }).onException(() -> containerStore.put(cid, containerInfo)).execute();
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void reinitialize(
+      Table<ContainerID, ContainerInfo> store) throws IOException {
+    lock.writeLock().lock();
+    try {
+      close();
+      this.containerStore = store;
+      this.containers = new ContainerStateMap();
+      this.lastUsedMap = new ConcurrentHashMap<>();
+      initialize();
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      containerStore.close();
+    } catch (Exception e) {
+      throw new IOException(e);
+    }
+  }
+
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for ContainerStateManager.
+   */
+  public static class Builder {
+    private Configuration conf;
+    private PipelineManager pipelineMgr;
+    private SCMRatisServer scmRatisServer;
+    private Table<ContainerID, ContainerInfo> table;
+    private DBTransactionBuffer transactionBuffer;
+
+    public Builder setSCMDBTransactionBuffer(DBTransactionBuffer buffer) {
+      this.transactionBuffer = buffer;
+      return this;
+    }
+    public Builder setConfiguration(final Configuration config) {
+      conf = config;
+      return this;
+    }
+
+    public Builder setPipelineManager(final PipelineManager pipelineManager) {
+      pipelineMgr = pipelineManager;
+      return this;
+    }
+
+    public Builder setRatisServer(final SCMRatisServer ratisServer) {
+      scmRatisServer = ratisServer;
+      return this;
+    }
+
+    public Builder setContainerStore(
+        final Table<ContainerID, ContainerInfo> containerStore) {
+      table = containerStore;
+      return this;
+    }
+
+    public ContainerStateManagerV2 build() throws IOException {
+      Preconditions.checkNotNull(conf);
+      Preconditions.checkNotNull(pipelineMgr);
+      Preconditions.checkNotNull(table);
+
+      final ContainerStateManagerV2 csm = new ContainerStateManagerImpl(
+          conf, pipelineMgr, table, transactionBuffer);
+
+      final SCMHAInvocationHandler invocationHandler =
+          new SCMHAInvocationHandler(RequestType.CONTAINER, csm,
+              scmRatisServer);
+
+      return (ContainerStateManagerV2) Proxy.newProxyInstance(
+          SCMHAInvocationHandler.class.getClassLoader(),
+          new Class<?>[]{ContainerStateManagerV2.class}, invocationHandler);
+    }
+
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
new file mode 100644
index 0000000..276e21c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.scm.metadata.Replicate;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
+
+/**
+ * A ContainerStateManager is responsible for keeping track of all the
+ * container and its state inside SCM, it also exposes methods to read and
+ * modify the container and its state.
+ *
+ * All the mutation operations are marked with {@link Replicate} annotation so
+ * that when SCM-HA is enabled, the mutations are replicated from leader SCM
+ * to the followers.
+ *
+ * When a method is marked with {@link Replicate} annotation it should follow
+ * the below rules.
+ *
+ * 1. The method call should be Idempotent
+ * 2. Arguments should be of protobuf objects
+ * 3. Return type should be of protobuf object
+ * 4. The declaration should throw RaftException
+ *
+ */
+public interface ContainerStateManagerV2 {
+
+  //TODO: Rename this to ContainerStateManager
+
+  /* **********************************************************************
+   * Container Life Cycle                                                 *
+   *                                                                      *
+   * Event and State Transition Mapping:                                  *
+   *                                                                      *
+   * State: OPEN         ----------------> CLOSING                        *
+   * Event:                    FINALIZE                                   *
+   *                                                                      *
+   * State: CLOSING      ----------------> QUASI_CLOSED                   *
+   * Event:                  QUASI_CLOSE                                  *
+   *                                                                      *
+   * State: CLOSING      ----------------> CLOSED                         *
+   * Event:                     CLOSE                                     *
+   *                                                                      *
+   * State: QUASI_CLOSED ----------------> CLOSED                         *
+   * Event:                  FORCE_CLOSE                                  *
+   *                                                                      *
+   * State: CLOSED       ----------------> DELETING                       *
+   * Event:                    DELETE                                     *
+   *                                                                      *
+   * State: DELETING     ----------------> DELETED                        *
+   * Event:                    CLEANUP                                    *
+   *                                                                      *
+   *                                                                      *
+   * Container State Flow:                                                *
+   *                                                                      *
+   * [OPEN]--------------->[CLOSING]--------------->[QUASI_CLOSED]        *
+   *          (FINALIZE)      |      (QUASI_CLOSE)        |               *
+   *                          |                           |               *
+   *                          |                           |               *
+   *                  (CLOSE) |             (FORCE_CLOSE) |               *
+   *                          |                           |               *
+   *                          |                           |               *
+   *                          +--------->[CLOSED]<--------+               *
+   *                                        |                             *
+   *                                (DELETE)|                             *
+   *                                        |                             *
+   *                                        |                             *
+   *                                   [DELETING]                         *
+   *                                        |                             *
+   *                              (CLEANUP) |                             *
+   *                                        |                             *
+   *                                        V                             *
+   *                                    [DELETED]                         *
+   *                                                                      *
+   ************************************************************************/
+
+  /**
+   *
+   */
+  boolean contains(HddsProtos.ContainerID containerID);
+
+  /**
+   * Returns the ID of all the managed containers.
+   *
+   * @return Set of {@link ContainerID}
+   */
+  Set<ContainerID> getContainerIDs();
+
+  /**
+   *
+   */
+  Set<ContainerID> getContainerIDs(LifeCycleState state);
+
+  /**
+   *
+   */
+  ContainerInfo getContainer(HddsProtos.ContainerID id);
+
+  /**
+   *
+   */
+  Set<ContainerReplica> getContainerReplicas(HddsProtos.ContainerID id);
+
+  /**
+   *
+   */
+  void updateContainerReplica(HddsProtos.ContainerID id,
+                              ContainerReplica replica);
+
+  /**
+   *
+   */
+  void removeContainerReplica(HddsProtos.ContainerID id,
+                              ContainerReplica replica);
+
+  /**
+   *
+   */
+  @Replicate
+  void addContainer(ContainerInfoProto containerInfo)
+      throws IOException;
+
+  /**
+   *
+   */
+  @Replicate
+  void updateContainerState(HddsProtos.ContainerID id,
+                            HddsProtos.LifeCycleEvent event)
+      throws IOException, InvalidStateTransitionException;
+
+  /**
+   *
+   */
+  // Make this as @Replicate
+  void updateDeleteTransactionId(Map<ContainerID, Long> deleteTransactionMap)
+      throws IOException;
+
+  /**
+   *
+   */
+  ContainerInfo getMatchingContainer(long size, String owner,
+                                     PipelineID pipelineID,
+                                     NavigableSet<ContainerID> containerIDs);
+
+  /**
+   *
+   */
+  @Replicate
+  void removeContainer(HddsProtos.ContainerID containerInfo)
+      throws IOException;
+
+  /**
+   * Reinitialize the ContainerStateManager with container store.
+   * @param containerStore container table.
+   * @throws IOException
+   */
+  void reinitialize(Table<ContainerID, ContainerInfo> containerStore)
+      throws IOException;
+
+  /**
+   *
+   */
+  void close() throws IOException;
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index 5ecbed3..1ad507d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -23,12 +23,14 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos
     .ContainerReplicaProto;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .IncrementalContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -46,8 +48,9 @@
 
   public IncrementalContainerReportHandler(
       final NodeManager nodeManager,
-      final ContainerManager containerManager)  {
-    super(containerManager, LOG);
+      final ContainerManagerV2 containerManager,
+      final SCMContext scmContext) {
+    super(containerManager, scmContext, LOG);
     this.nodeManager = nodeManager;
   }
 
@@ -71,7 +74,7 @@
     for (ContainerReplicaProto replicaProto :
         report.getReport().getReportList()) {
       try {
-        final ContainerID id = ContainerID.valueof(
+        final ContainerID id = ContainerID.valueOf(
             replicaProto.getContainerID());
         if (!replicaProto.getState().equals(
             ContainerReplicaProto.State.DELETED)) {
@@ -89,7 +92,7 @@
         success = false;
         LOG.warn("Container {} replica not found!",
             replicaProto.getContainerID());
-      } catch (IOException e) {
+      } catch (IOException | InvalidStateTransitionException e) {
         success = false;
         LOG.error("Exception while processing ICR for container {}",
             replicaProto.getContainerID(), e);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index bd06cbf..2d757d7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -22,6 +22,7 @@
 import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -31,13 +32,18 @@
 import java.util.StringJoiner;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
 import java.util.function.Consumer;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigType;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -46,16 +52,18 @@
 import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMService;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.lock.LockManager;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
@@ -70,6 +78,8 @@
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
 import static org.apache.hadoop.hdds.conf.ConfigTag.SCM;
+
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.apache.ratis.util.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -79,8 +89,7 @@
  * that the containers are properly replicated. Replication Manager deals only
  * with Quasi Closed / Closed container.
  */
-public class ReplicationManager
-    implements MetricsSource, EventHandler<SafeModeStatus> {
+public class ReplicationManager implements MetricsSource, SCMService {
 
   public static final Logger LOG =
       LoggerFactory.getLogger(ReplicationManager.class);
@@ -90,7 +99,7 @@
   /**
    * Reference to the ContainerManager.
    */
-  private final ContainerManager containerManager;
+  private final ContainerManagerV2 containerManager;
 
   /**
    * PlacementPolicy which is used to identify where a container
@@ -104,6 +113,11 @@
   private final EventPublisher eventPublisher;
 
   /**
+   * SCMContext from StorageContainerManager.
+   */
+  private final SCMContext scmContext;
+
+  /**
    * Used for locking a container using its ID while processing it.
    */
   private final LockManager<ContainerID> lockManager;
@@ -128,7 +142,7 @@
   /**
    * ReplicationManager specific configuration.
    */
-  private final ReplicationManagerConfiguration conf;
+  private final ReplicationManagerConfiguration rmConf;
 
   /**
    * ReplicationMonitor thread is the one which wakes up at configured
@@ -148,6 +162,16 @@
   private int minHealthyForMaintenance;
 
   /**
+   * SCMService related variables.
+   * After leaving safe mode, replicationMonitor needs to wait for a while
+   * before really take effect.
+   */
+  private final Lock serviceLock = new ReentrantLock();
+  private ServiceStatus serviceStatus = ServiceStatus.PAUSING;
+  private final long waitTimeInMillis;
+  private long lastTimeToBeReadyInMillis = 0;
+
+  /**
    * Constructs ReplicationManager instance with the given configuration.
    *
    * @param conf OzoneConfiguration
@@ -155,27 +179,43 @@
    * @param containerPlacement PlacementPolicy
    * @param eventPublisher EventPublisher
    */
-  public ReplicationManager(final ReplicationManagerConfiguration conf,
-                            final ContainerManager containerManager,
+  @SuppressWarnings("parameternumber")
+  public ReplicationManager(final ConfigurationSource conf,
+                            final ContainerManagerV2 containerManager,
                             final PlacementPolicy containerPlacement,
                             final EventPublisher eventPublisher,
+                            final SCMContext scmContext,
+                            final SCMServiceManager serviceManager,
                             final LockManager<ContainerID> lockManager,
                             final NodeManager nodeManager) {
     this.containerManager = containerManager;
     this.containerPlacement = containerPlacement;
     this.eventPublisher = eventPublisher;
+    this.scmContext = scmContext;
     this.lockManager = lockManager;
     this.nodeManager = nodeManager;
-    this.conf = conf;
+    this.rmConf = conf.getObject(ReplicationManagerConfiguration.class);
     this.running = false;
     this.inflightReplication = new ConcurrentHashMap<>();
     this.inflightDeletion = new ConcurrentHashMap<>();
-    this.minHealthyForMaintenance = conf.getMaintenanceReplicaMinimum();
+    this.minHealthyForMaintenance = rmConf.getMaintenanceReplicaMinimum();
+
+    this.waitTimeInMillis = conf.getTimeDuration(
+        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
+        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT,
+        TimeUnit.MILLISECONDS);
+
+    // register ReplicationManager to SCMServiceManager.
+    serviceManager.register(this);
+
+    // start ReplicationManager.
+    start();
   }
 
   /**
    * Starts Replication Monitor thread.
    */
+  @Override
   public synchronized void start() {
 
     if (!isRunning()) {
@@ -243,15 +283,15 @@
     try {
       while (running) {
         final long start = Time.monotonicNow();
-        final Set<ContainerID> containerIds =
-            containerManager.getContainerIDs();
-        containerIds.forEach(this::processContainer);
+        final List<ContainerInfo> containers =
+            containerManager.getContainers();
+        containers.forEach(this::processContainer);
 
         LOG.info("Replication Monitor Thread took {} milliseconds for" +
                 " processing {} containers.", Time.monotonicNow() - start,
-            containerIds.size());
+            containers.size());
 
-        wait(conf.getInterval());
+        wait(rmConf.getInterval());
       }
     } catch (Throwable t) {
       // When we get runtime exception, we should terminate SCM.
@@ -263,12 +303,16 @@
   /**
    * Process the given container.
    *
-   * @param id ContainerID
+   * @param container ContainerInfo
    */
-  private void processContainer(ContainerID id) {
-    lockManager.writeLock(id);
+  private void processContainer(ContainerInfo container) {
+    if (!shouldRun()) {
+      return;
+    }
+
+    final ContainerID id = container.containerID();
+    lockManager.lock(id);
     try {
-      final ContainerInfo container = containerManager.getContainer(id);
       final Set<ContainerReplica> replicas = containerManager
           .getContainerReplicas(container.containerID());
       final LifeCycleState state = container.getState();
@@ -407,7 +451,7 @@
       final Map<ContainerID, List<InflightAction>> inflightActions,
       final Predicate<InflightAction> filter) {
     final ContainerID id = container.containerID();
-    final long deadline = Time.monotonicNow() - conf.getEventTimeout();
+    final long deadline = Time.monotonicNow() - rmConf.getEventTimeout();
     if (inflightActions.containsKey(id)) {
       final List<InflightAction> actions = inflightActions.get(id);
 
@@ -554,7 +598,8 @@
    * @param replicas Set of ContainerReplicas
    */
   private void deleteContainerReplicas(final ContainerInfo container,
-      final Set<ContainerReplica> replicas) throws IOException {
+      final Set<ContainerReplica> replicas) throws IOException,
+      InvalidStateTransitionException {
     Preconditions.assertTrue(container.getState() ==
         LifeCycleState.CLOSED);
     Preconditions.assertTrue(container.getNumberOfKeys() == 0);
@@ -578,7 +623,8 @@
    * @param replicas Set of ContainerReplicas
    */
   private void handleContainerUnderDelete(final ContainerInfo container,
-      final Set<ContainerReplica> replicas) throws IOException {
+      final Set<ContainerReplica> replicas) throws IOException,
+      InvalidStateTransitionException {
     if (replicas.size() == 0) {
       containerManager.updateContainerState(container.containerID(),
           HddsProtos.LifeCycleEvent.CLEANUP);
@@ -781,6 +827,12 @@
 
       final List<ContainerReplica> eligibleReplicas = new ArrayList<>(replicas);
 
+      // Iterate replicas in deterministic order to avoid potential data loss.
+      // See https://issues.apache.org/jira/browse/HDDS-4589.
+      // N.B., sort replicas by (containerID, datanodeDetails).
+      eligibleReplicas.sort(
+          Comparator.comparingLong(ContainerReplica::hashCode));
+
       final Map<UUID, ContainerReplica> uniqueReplicas =
           new LinkedHashMap<>();
 
@@ -947,10 +999,16 @@
 
     LOG.info("Sending close container command for container {}" +
             " to datanode {}.", container.containerID(), datanode);
-
     CloseContainerCommand closeContainerCommand =
         new CloseContainerCommand(container.getContainerID(),
             container.getPipelineID(), force);
+    try {
+      closeContainerCommand.setTerm(scmContext.getTermOfLeader());
+    } catch (NotLeaderException nle) {
+      LOG.warn("Skip sending close container command,"
+          + " since current SCM is not leader.", nle);
+      return;
+    }
     eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND,
         new CommandForDatanode<>(datanode.getUuid(), closeContainerCommand));
   }
@@ -1017,6 +1075,13 @@
       final DatanodeDetails datanode,
       final SCMCommand<T> command,
       final Consumer<InflightAction> tracker) {
+    try {
+      command.setTerm(scmContext.getTermOfLeader());
+    } catch (NotLeaderException nle) {
+      LOG.warn("Skip sending datanode command,"
+          + " since current SCM is not leader.", nle);
+      return;
+    }
     final CommandForDatanode<T> datanodeCommand =
         new CommandForDatanode<>(datanode.getUuid(), command);
     eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, datanodeCommand);
@@ -1088,14 +1153,6 @@
         .endRecord();
   }
 
-  @Override
-  public void onMessage(SafeModeStatus status,
-      EventPublisher publisher) {
-    if (!status.isInSafeMode() && !this.isRunning()) {
-      this.start();
-    }
-  }
-
   /**
    * Wrapper class to hold the InflightAction with its start time.
    */
@@ -1209,4 +1266,42 @@
           .toString();
     }
   }
+
+  @Override
+  public void notifyStatusChanged() {
+    serviceLock.lock();
+    try {
+      // 1) SCMContext#isLeader returns true.
+      // 2) not in safe mode.
+      if (scmContext.isLeader() && !scmContext.isInSafeMode()) {
+        // transition from PAUSING to RUNNING
+        if (serviceStatus != ServiceStatus.RUNNING) {
+          LOG.info("Service {} transitions to RUNNING.", getServiceName());
+          lastTimeToBeReadyInMillis = Time.monotonicNow();
+          serviceStatus = ServiceStatus.RUNNING;
+        }
+      } else {
+        serviceStatus = ServiceStatus.PAUSING;
+      }
+    } finally {
+      serviceLock.unlock();
+    }
+  }
+
+  @Override
+  public boolean shouldRun() {
+    serviceLock.lock();
+    try {
+      // If safe mode is off, then this SCMService starts to run with a delay.
+      return serviceStatus == ServiceStatus.RUNNING &&
+          Time.monotonicNow() - lastTimeToBeReadyInMillis >= waitTimeInMillis;
+    } finally {
+      serviceLock.unlock();
+    }
+  }
+
+  @Override
+  public String getServiceName() {
+    return ReplicationManager.class.getSimpleName();
+  }
 }
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 849a891..20592bb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -130,7 +130,7 @@
       try {
         if (container.getState() == LifeCycleState.OPEN) {
           pipelineManager.addContainerToPipeline(container.getPipelineID(),
-              ContainerID.valueof(container.getContainerID()));
+              ContainerID.valueOf(container.getContainerID()));
         }
       } catch (PipelineNotFoundException ex) {
         LOG.warn("Found a Container {} which is in {} state with pipeline {} " +
@@ -216,7 +216,9 @@
   public boolean exists(ContainerID containerID) {
     lock.lock();
     try {
-      return (containerStateManager.getContainer(containerID) != null);
+      Preconditions.checkNotNull(
+          containerStateManager.getContainer(containerID));
+      return true;
     } catch (ContainerNotFoundException e) {
       return false;
     } finally {
@@ -303,7 +305,7 @@
         // PipelineStateManager.
         pipelineManager.removeContainerFromPipeline(
             containerInfo.getPipelineID(),
-            new ContainerID(containerInfo.getContainerID()));
+            containerInfo.containerID());
         throw ex;
       }
       return containerInfo;
@@ -400,6 +402,7 @@
     }
   }
 
+
   /**
    * Update deleteTransactionId according to deleteTransactionMap.
    *
@@ -418,7 +421,8 @@
     try(BatchOperation batchOperation = batchHandler.initBatchOperation()) {
       for (Map.Entry< Long, Long > entry : deleteTransactionMap.entrySet()) {
         long containerID = entry.getKey();
-        ContainerID containerIdObject = new ContainerID(containerID);
+
+        ContainerID containerIdObject = ContainerID.valueOf(containerID);
         ContainerInfo containerInfo =
             containerStore.get(containerIdObject);
         try {
@@ -518,7 +522,7 @@
       throws IOException {
     try {
       containerStore
-          .put(new ContainerID(containerInfo.getContainerID()), containerInfo);
+          .put(containerInfo.containerID(), containerInfo);
       // Incrementing here, as allocateBlock to create a container calls
       // getMatchingContainer() and finally calls this API to add newly
       // created container to DB.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
index 8500063..08be71f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
@@ -132,7 +132,7 @@
    * @return true or false
    */
   public boolean hasContainerID(T key, int id) {
-    return hasContainerID(key, ContainerID.valueof(id));
+    return hasContainerID(key, ContainerID.valueOf(id));
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index 7cb2a6b..a76c851 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -18,31 +18,27 @@
 
 package org.apache.hadoop.hdds.scm.container.states;
 
-import com.google.common.base.Preconditions;
-
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaNotFoundException;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.util.Set;
 import java.util.Collections;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.TreeSet;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.ConcurrentHashMap;
 
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .CONTAINER_EXISTS;
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
 import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
     .FAILED_TO_CHANGE_CONTAINER_STATE;
 
@@ -76,6 +72,8 @@
  * select a container that belongs to user1, with Ratis replication which can
  * make 3 copies of data. The fact that we will look for open containers by
  * default and if we cannot find them we will add new containers.
+ *
+ * All the calls are idempotent.
  */
 public class ContainerStateMap {
   private static final Logger LOG =
@@ -92,11 +90,6 @@
   private final Map<ContainerID, Set<ContainerReplica>> replicaMap;
   private final Map<ContainerQueryKey, NavigableSet<ContainerID>> resultCache;
 
-  // Container State Map lock should be held before calling into
-  // Update ContainerAttributes. The consistency of ContainerAttributes is
-  // protected by this lock.
-  private final ReadWriteLock lock;
-
   /**
    * Create a ContainerStateMap.
    */
@@ -106,7 +99,6 @@
     this.factorMap = new ContainerAttribute<>();
     this.typeMap = new ContainerAttribute<>();
     this.containerMap = new ConcurrentHashMap<>();
-    this.lock = new ReentrantReadWriteLock();
     this.replicaMap = new ConcurrentHashMap<>();
     this.resultCache = new ConcurrentHashMap<>();
   }
@@ -120,19 +112,9 @@
   public void addContainer(final ContainerInfo info)
       throws SCMException {
     Preconditions.checkNotNull(info, "Container Info cannot be null");
-    Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0,
-        "ExpectedReplicaCount should be greater than 0");
-
-    lock.writeLock().lock();
-    try {
-      final ContainerID id = info.containerID();
-      if (containerMap.putIfAbsent(id, info) != null) {
-        LOG.debug("Duplicate container ID detected. {}", id);
-        throw new
-            SCMException("Duplicate container ID detected.",
-            CONTAINER_EXISTS);
-      }
-
+    final ContainerID id = info.containerID();
+    if (!contains(id)) {
+      containerMap.put(id, info);
       lifeCycleStateMap.insert(info.getState(), id);
       ownerMap.insert(info.getOwner(), id);
       factorMap.insert(info.getReplicationFactor(), id);
@@ -142,36 +124,32 @@
       // Flush the cache of this container type, will be added later when
       // get container queries are executed.
       flushCache(info);
-      LOG.trace("Created container with {} successfully.", id);
-    } finally {
-      lock.writeLock().unlock();
+      LOG.trace("Container {} added to ContainerStateMap.", id);
     }
   }
 
+  public boolean contains(final ContainerID id) {
+    return containerMap.containsKey(id);
+  }
+
   /**
    * Removes a Container Entry from ContainerStateMap.
    *
-   * @param containerID - ContainerID
-   * @throws SCMException - throws if create failed.
+   * @param id - ContainerID
    */
-  public void removeContainer(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    Preconditions.checkNotNull(containerID, "ContainerID cannot be null");
-    lock.writeLock().lock();
-    try {
-      checkIfContainerExist(containerID);
+  public void removeContainer(final ContainerID id) {
+    Preconditions.checkNotNull(id, "ContainerID cannot be null");
+    if (contains(id)) {
       // Should we revert back to the original state if any of the below
       // remove operation fails?
-      final ContainerInfo info = containerMap.remove(containerID);
-      lifeCycleStateMap.remove(info.getState(), containerID);
-      ownerMap.remove(info.getOwner(), containerID);
-      factorMap.remove(info.getReplicationFactor(), containerID);
-      typeMap.remove(info.getReplicationType(), containerID);
+      final ContainerInfo info = containerMap.remove(id);
+      lifeCycleStateMap.remove(info.getState(), id);
+      ownerMap.remove(info.getOwner(), id);
+      factorMap.remove(info.getReplicationFactor(), id);
+      typeMap.remove(info.getReplicationType(), id);
       // Flush the cache of this container type.
       flushCache(info);
-      LOG.trace("Removed container with {} successfully.", containerID);
-    } finally {
-      lock.writeLock().unlock();
+      LOG.trace("Container {} removed from ContainerStateMap.", id);
     }
   }
 
@@ -179,37 +157,24 @@
    * Returns the latest state of Container from SCM's Container State Map.
    *
    * @param containerID - ContainerID
-   * @return container info, if found.
+   * @return container info, if found else null.
    */
-  public ContainerInfo getContainerInfo(final ContainerID containerID)
-      throws ContainerNotFoundException {
-    lock.readLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      return containerMap.get(containerID);
-    } finally {
-      lock.readLock().unlock();
-    }
+  public ContainerInfo getContainerInfo(final ContainerID containerID) {
+    return containerMap.get(containerID);
   }
 
   /**
    * Returns the latest list of DataNodes where replica for given containerId
-   * exist. Throws an SCMException if no entry is found for given containerId.
+   * exist.
    *
    * @param containerID
    * @return Set<DatanodeDetails>
    */
   public Set<ContainerReplica> getContainerReplicas(
-      final ContainerID containerID) throws ContainerNotFoundException {
+      final ContainerID containerID) {
     Preconditions.checkNotNull(containerID);
-    lock.readLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      return Collections
-          .unmodifiableSet(replicaMap.get(containerID));
-    } finally {
-      lock.readLock().unlock();
-    }
+    final Set<ContainerReplica> replicas = replicaMap.get(containerID);
+    return replicas == null ? null : Collections.unmodifiableSet(replicas);
   }
 
   /**
@@ -221,16 +186,12 @@
    * @param replica
    */
   public void updateContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica) throws ContainerNotFoundException {
+      final ContainerReplica replica) {
     Preconditions.checkNotNull(containerID);
-    lock.writeLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      Set<ContainerReplica> replicas = replicaMap.get(containerID);
+    if (contains(containerID)) {
+      final Set<ContainerReplica> replicas = replicaMap.get(containerID);
       replicas.remove(replica);
       replicas.add(replica);
-    } finally {
-      lock.writeLock().unlock();
     }
   }
 
@@ -242,21 +203,11 @@
    * @return True of dataNode is removed successfully else false.
    */
   public void removeContainerReplica(final ContainerID containerID,
-      final ContainerReplica replica)
-      throws ContainerNotFoundException, ContainerReplicaNotFoundException {
+      final ContainerReplica replica) {
     Preconditions.checkNotNull(containerID);
     Preconditions.checkNotNull(replica);
-
-    lock.writeLock().lock();
-    try {
-      checkIfContainerExist(containerID);
-      if(!replicaMap.get(containerID).remove(replica)) {
-        throw new ContainerReplicaNotFoundException(
-            "Container #"
-                + containerID.getId() + ", replica: " + replica);
-      }
-    } finally {
-      lock.writeLock().unlock();
+    if (contains(containerID)) {
+      replicaMap.get(containerID).remove(replica);
     }
   }
 
@@ -264,17 +215,13 @@
    * Just update the container State.
    * @param info ContainerInfo.
    */
-  public void updateContainerInfo(final ContainerInfo info)
-      throws ContainerNotFoundException {
-    lock.writeLock().lock();
-    try {
-      Preconditions.checkNotNull(info);
-      checkIfContainerExist(info.containerID());
-      final ContainerInfo currentInfo = containerMap.get(info.containerID());
+  public void updateContainerInfo(final ContainerInfo info) {
+    Preconditions.checkNotNull(info);
+    final ContainerID id = info.containerID();
+    if (contains(id)) {
+      final ContainerInfo currentInfo = containerMap.get(id);
       flushCache(info, currentInfo);
-      containerMap.put(info.containerID(), info);
-    } finally {
-      lock.writeLock().unlock();
+      containerMap.put(id, info);
     }
   }
 
@@ -287,61 +234,59 @@
    * @throws SCMException - in case of failure.
    */
   public void updateState(ContainerID containerID, LifeCycleState currentState,
-      LifeCycleState newState) throws SCMException, ContainerNotFoundException {
+      LifeCycleState newState) throws SCMException {
     Preconditions.checkNotNull(currentState);
     Preconditions.checkNotNull(newState);
-    lock.writeLock().lock();
+    if (!contains(containerID)) {
+      return;
+    }
+    // Return if updating state not changed
+    if (currentState == newState) {
+      LOG.debug("CurrentState and NewState are the same, return from " +
+          "updateState directly.");
+      return;
+    }
+    // TODO: Simplify this logic.
+    final ContainerInfo currentInfo = containerMap.get(containerID);
     try {
-      checkIfContainerExist(containerID);
-      // Return if updating state not changed
-      if (currentState == newState) {
-        LOG.debug("CurrentState and NewState are the same, return from " +
-            "updateState directly.");
-        return;
+      currentInfo.setState(newState);
+
+      // We are updating two places before this update is done, these can
+      // fail independently, since the code needs to handle it.
+
+      // We update the attribute map, if that fails it will throw an
+      // exception, so no issues, if we are successful, we keep track of the
+      // fact that we have updated the lifecycle state in the map, and update
+      // the container state. If this second update fails, we will attempt to
+      // roll back the earlier change we did. If the rollback fails, we can
+      // be in an inconsistent state,
+
+      lifeCycleStateMap.update(currentState, newState, containerID);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Updated the container {} to new state. Old = {}, new = " +
+            "{}", containerID, currentState, newState);
       }
-      final ContainerInfo currentInfo = containerMap.get(containerID);
-      try {
-        currentInfo.setState(newState);
 
-        // We are updating two places before this update is done, these can
-        // fail independently, since the code needs to handle it.
+      // Just flush both old and new data sets from the result cache.
+      flushCache(currentInfo);
+    } catch (SCMException ex) {
+      LOG.error("Unable to update the container state.", ex);
+      // we need to revert the change in this attribute since we are not
+      // able to update the hash table.
+      LOG.info("Reverting the update to lifecycle state. Moving back to " +
+              "old state. Old = {}, Attempted state = {}", currentState,
+          newState);
 
-        // We update the attribute map, if that fails it will throw an
-        // exception, so no issues, if we are successful, we keep track of the
-        // fact that we have updated the lifecycle state in the map, and update
-        // the container state. If this second update fails, we will attempt to
-        // roll back the earlier change we did. If the rollback fails, we can
-        // be in an inconsistent state,
+      currentInfo.setState(currentState);
 
-        lifeCycleStateMap.update(currentState, newState, containerID);
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Updated the container {} to new state. Old = {}, new = " +
-              "{}", containerID, currentState, newState);
-        }
+      // if this line throws, the state map can be in an inconsistent
+      // state, since we will have modified the attribute by the
+      // container state will not in sync since we were not able to put
+      // that into the hash table.
+      lifeCycleStateMap.update(newState, currentState, containerID);
 
-        // Just flush both old and new data sets from the result cache.
-        flushCache(currentInfo);
-      } catch (SCMException ex) {
-        LOG.error("Unable to update the container state.", ex);
-        // we need to revert the change in this attribute since we are not
-        // able to update the hash table.
-        LOG.info("Reverting the update to lifecycle state. Moving back to " +
-                "old state. Old = {}, Attempted state = {}", currentState,
-            newState);
-
-        currentInfo.setState(currentState);
-
-        // if this line throws, the state map can be in an inconsistent
-        // state, since we will have modified the attribute by the
-        // container state will not in sync since we were not able to put
-        // that into the hash table.
-        lifeCycleStateMap.update(newState, currentState, containerID);
-
-        throw new SCMException("Updating the container map failed.", ex,
-            FAILED_TO_CHANGE_CONTAINER_STATE);
-      }
-    } finally {
-      lock.writeLock().unlock();
+      throw new SCMException("Updating the container map failed.", ex,
+          FAILED_TO_CHANGE_CONTAINER_STATE);
     }
   }
 
@@ -357,12 +302,7 @@
    */
   NavigableSet<ContainerID> getContainerIDsByOwner(final String ownerName) {
     Preconditions.checkNotNull(ownerName);
-    lock.readLock().lock();
-    try {
-      return ownerMap.getCollection(ownerName);
-    } finally {
-      lock.readLock().unlock();
-    }
+    return ownerMap.getCollection(ownerName);
   }
 
   /**
@@ -373,12 +313,7 @@
    */
   NavigableSet<ContainerID> getContainerIDsByType(final ReplicationType type) {
     Preconditions.checkNotNull(type);
-    lock.readLock().lock();
-    try {
-      return typeMap.getCollection(type);
-    } finally {
-      lock.readLock().unlock();
-    }
+    return typeMap.getCollection(type);
   }
 
   /**
@@ -390,12 +325,7 @@
   NavigableSet<ContainerID> getContainerIDsByFactor(
       final ReplicationFactor factor) {
     Preconditions.checkNotNull(factor);
-    lock.readLock().lock();
-    try {
-      return factorMap.getCollection(factor);
-    } finally {
-      lock.readLock().unlock();
-    }
+    return factorMap.getCollection(factor);
   }
 
   /**
@@ -407,12 +337,7 @@
   public NavigableSet<ContainerID> getContainerIDsByState(
       final LifeCycleState state) {
     Preconditions.checkNotNull(state);
-    lock.readLock().lock();
-    try {
-      return lifeCycleStateMap.getCollection(state);
-    } finally {
-      lock.readLock().unlock();
-    }
+    return lifeCycleStateMap.getCollection(state);
   }
 
   /**
@@ -433,57 +358,52 @@
     Preconditions.checkNotNull(factor, "Factor cannot be null");
     Preconditions.checkNotNull(type, "Type cannot be null");
 
-    lock.readLock().lock();
-    try {
-      final ContainerQueryKey queryKey =
-          new ContainerQueryKey(state, owner, factor, type);
-      if(resultCache.containsKey(queryKey)){
-        return resultCache.get(queryKey);
-      }
-
-      // If we cannot meet any one condition we return EMPTY_SET immediately.
-      // Since when we intersect these sets, the result will be empty if any
-      // one is empty.
-      final NavigableSet<ContainerID> stateSet =
-          lifeCycleStateMap.getCollection(state);
-      if (stateSet.size() == 0) {
-        return EMPTY_SET;
-      }
-
-      final NavigableSet<ContainerID> ownerSet =
-          ownerMap.getCollection(owner);
-      if (ownerSet.size() == 0) {
-        return EMPTY_SET;
-      }
-
-      final NavigableSet<ContainerID> factorSet =
-          factorMap.getCollection(factor);
-      if (factorSet.size() == 0) {
-        return EMPTY_SET;
-      }
-
-      final NavigableSet<ContainerID> typeSet =
-          typeMap.getCollection(type);
-      if (typeSet.size() == 0) {
-        return EMPTY_SET;
-      }
-
-
-      // if we add more constraints we will just add those sets here..
-      final NavigableSet<ContainerID>[] sets = sortBySize(stateSet,
-          ownerSet, factorSet, typeSet);
-
-      NavigableSet<ContainerID> currentSet = sets[0];
-      // We take the smallest set and intersect against the larger sets. This
-      // allows us to reduce the lookups to the least possible number.
-      for (int x = 1; x < sets.length; x++) {
-        currentSet = intersectSets(currentSet, sets[x]);
-      }
-      resultCache.put(queryKey, currentSet);
-      return currentSet;
-    } finally {
-      lock.readLock().unlock();
+    final ContainerQueryKey queryKey =
+        new ContainerQueryKey(state, owner, factor, type);
+    if(resultCache.containsKey(queryKey)){
+      return resultCache.get(queryKey);
     }
+
+    // If we cannot meet any one condition we return EMPTY_SET immediately.
+    // Since when we intersect these sets, the result will be empty if any
+    // one is empty.
+    final NavigableSet<ContainerID> stateSet =
+        lifeCycleStateMap.getCollection(state);
+    if (stateSet.size() == 0) {
+      return EMPTY_SET;
+    }
+
+    final NavigableSet<ContainerID> ownerSet =
+        ownerMap.getCollection(owner);
+    if (ownerSet.size() == 0) {
+      return EMPTY_SET;
+    }
+
+    final NavigableSet<ContainerID> factorSet =
+        factorMap.getCollection(factor);
+    if (factorSet.size() == 0) {
+      return EMPTY_SET;
+    }
+
+    final NavigableSet<ContainerID> typeSet =
+        typeMap.getCollection(type);
+    if (typeSet.size() == 0) {
+      return EMPTY_SET;
+    }
+
+
+    // if we add more constraints we will just add those sets here..
+    final NavigableSet<ContainerID>[] sets = sortBySize(stateSet,
+        ownerSet, factorSet, typeSet);
+
+    NavigableSet<ContainerID> currentSet = sets[0];
+    // We take the smallest set and intersect against the larger sets. This
+    // allows us to reduce the lookups to the least possible number.
+    for (int x = 1; x < sets.length; x++) {
+      currentSet = intersectSets(currentSet, sets[x]);
+    }
+    resultCache.put(queryKey, currentSet);
+    return currentSet;
   }
 
   /**
@@ -541,12 +461,4 @@
     }
   }
 
-  private void checkIfContainerExist(ContainerID containerID)
-      throws ContainerNotFoundException {
-    if (!containerMap.containsKey(containerID)) {
-      throw new ContainerNotFoundException("Container with id " +
-          containerID.getId() + " not found.");
-    }
-  }
-
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
index 6f6cc54..d01257b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -194,9 +194,6 @@
   public static final TypedEvent<SafeModeStatus> SAFE_MODE_STATUS =
       new TypedEvent<>(SafeModeStatus.class, "Safe mode status");
 
-  public static final TypedEvent<SafeModeStatus> DELAYED_SAFE_MODE_STATUS =
-      new TypedEvent<>(SafeModeStatus.class, "Delayed safe mode status");
-
   /**
    * Private Ctor. Never Constructed.
    */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/CheckedConsumer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/CheckedConsumer.java
new file mode 100644
index 0000000..f6a4e3e
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/CheckedConsumer.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+
+/**
+ * Represents an operation that accepts a single input argument and returns no
+ * result.
+ *
+ * @param <T> the type of the input to the operation
+ *
+ * @since 1.8
+ */
+@FunctionalInterface
+public interface CheckedConsumer<T, E extends Throwable> {
+
+  /**
+   * Performs the operation on the given argument.
+   *
+   * @param t the input argument
+   * @throws E in case the exception during execution
+   */
+  void execute(T t) throws E;
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/CheckedFunction.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/CheckedFunction.java
new file mode 100644
index 0000000..da8a9a4
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/CheckedFunction.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+/**
+ * Represents a function that accepts no argument and returns no result.
+ */
+@FunctionalInterface
+public interface CheckedFunction<E extends Throwable> {
+
+  /**
+   * Executes the given logic.
+   *
+   * @throws E in case the exception during execution
+   */
+  void execute() throws E;
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ExecutionUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ExecutionUtil.java
new file mode 100644
index 0000000..24fbfb8
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ExecutionUtil.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This Utility class is to rethrow the original exception after
+ * executing the clean-up code.
+ *
+ * @param <E> Exception to throw on failure
+ */
+public final class ExecutionUtil<E extends Throwable> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ExecutionUtil.class);
+
+  private final CheckedFunction<E> fn;
+
+  private CheckedFunction<E>  onException;
+
+  private volatile boolean completed;
+
+  private ExecutionUtil(final CheckedFunction<E> fn) {
+    this.fn = fn;
+    this.completed = false;
+  }
+
+  public static<E extends Exception> ExecutionUtil<E> create(
+      CheckedFunction<E> tryBlock) {
+    return new ExecutionUtil<>(tryBlock);
+  }
+
+  public ExecutionUtil<E> onException(CheckedFunction<E>  catchBlock) {
+    onException = catchBlock;
+    return this;
+  }
+
+  public void execute() throws E {
+    if (!completed) {
+      completed = true;
+      try {
+        fn.execute();
+      } catch (Exception ex) {
+        try {
+          onException.execute();
+        } catch (Exception error) {
+          LOG.warn("Got error while doing clean-up.", error);
+        }
+        throw (E) ex;
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
new file mode 100644
index 0000000..9b917db
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java
@@ -0,0 +1,284 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmNodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
+import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.server.SCMCertStore;
+import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
+import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultCAServer;
+import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultCAProfile;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse;
+import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient;
+import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
+import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.bouncycastle.cert.X509CertificateHolder;
+import org.bouncycastle.pkcs.PKCS10CertificationRequest;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.security.KeyPair;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.concurrent.ExecutionException;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.SCM;
+import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType.KERBEROS_TRUSTED;
+import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_ROOT_CA_COMPONENT_NAME;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_ROOT_CA_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_SUB_CA_PREFIX;
+
+public final class HASecurityUtils {
+
+  private HASecurityUtils() {
+  }
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(HASecurityUtils.class);
+
+  /**
+   * Initialize Security which generates public, private key pair and get SCM
+   * signed certificate and persist to local disk.
+   * @param scmStorageConfig
+   * @param fetchedScmId
+   * @param conf
+   * @param scmAddress
+   * @throws IOException
+   */
+  public static void initializeSecurity(SCMStorageConfig scmStorageConfig,
+      String fetchedScmId, OzoneConfiguration conf,
+      InetSocketAddress scmAddress, boolean primaryscm)
+      throws IOException {
+    LOG.info("Initializing secure StorageContainerManager.");
+
+    CertificateClient certClient =
+        new SCMCertificateClient(new SecurityConfig(conf));
+    InitResponse response = certClient.init();
+    LOG.info("Init response: {}", response);
+    switch (response) {
+    case SUCCESS:
+      LOG.info("Initialization successful.");
+      break;
+    case GETCERT:
+      if (!primaryscm) {
+        getRootCASignedSCMCert(certClient, conf, fetchedScmId, scmStorageConfig,
+            scmAddress);
+      } else {
+        getPrimarySCMSelfSignedCert(certClient, conf, fetchedScmId,
+            scmStorageConfig, scmAddress);
+      }
+      LOG.info("Successfully stored SCM signed certificate.");
+      break;
+    case FAILURE:
+      LOG.error("SCM security initialization failed.");
+      throw new RuntimeException("OM security initialization failed.");
+    case RECOVER:
+      LOG.error("SCM security initialization failed. SCM certificate is " +
+          "missing.");
+      throw new RuntimeException("SCM security initialization failed.");
+    default:
+      LOG.error("SCM security initialization failed. Init response: {}",
+          response);
+      throw new RuntimeException("SCM security initialization failed.");
+    }
+  }
+
+  /**
+   * For bootstrapped SCM get sub-ca signed certificate and root CA
+   * certificate using scm security client and store it using certificate
+   * client.
+   */
+  private static void getRootCASignedSCMCert(CertificateClient client,
+      OzoneConfiguration config, String fetchedSCMId,
+      SCMStorageConfig scmStorageConfig, InetSocketAddress scmAddress) {
+    try {
+      // Generate CSR.
+      PKCS10CertificationRequest csr = generateCSR(client, scmStorageConfig,
+          config, scmAddress, fetchedSCMId);
+
+      ScmNodeDetailsProto scmNodeDetailsProto =
+          ScmNodeDetailsProto.newBuilder()
+              .setClusterId(scmStorageConfig.getClusterID())
+              .setHostName(scmAddress.getHostName())
+              .setScmNodeId(fetchedSCMId).build();
+
+      // Create SCM security client.
+      SCMSecurityProtocolClientSideTranslatorPB secureScmClient =
+          HddsServerUtil.getScmSecurityClient(config);
+
+      // Get SCM sub CA cert.
+      SCMGetCertResponseProto response = secureScmClient.
+          getSCMCertChain(scmNodeDetailsProto, getEncodedString(csr));
+      String pemEncodedCert = response.getX509Certificate();
+
+      // Store SCM sub CA and root CA certificate.
+      if (response.hasX509CACertificate()) {
+        String pemEncodedRootCert = response.getX509CACertificate();
+        client.storeCertificate(pemEncodedRootCert, true, true);
+        client.storeCertificate(pemEncodedCert, true);
+
+        X509Certificate certificate =
+            CertificateCodec.getX509Certificate(pemEncodedCert);
+
+        persistSubCACertificate(config, client,
+            CertificateCodec.getCertificateHolder(certificate));
+
+        // Persist scm cert serial ID.
+        scmStorageConfig.setScmCertSerialId(certificate.getSerialNumber()
+            .toString());
+      } else {
+        throw new RuntimeException("Unable to retrieve SCM certificate chain");
+      }
+    } catch (IOException | CertificateException e) {
+      LOG.error("Error while fetching/storing SCM signed certificate.", e);
+      throw new RuntimeException(e);
+    }
+  }
+
+
+  /**
+   * For primary SCM get sub-ca signed certificate and root CA certificate by
+   * root CA certificate server and store it using certificate client.
+   */
+  private static void getPrimarySCMSelfSignedCert(CertificateClient client,
+      OzoneConfiguration config, String fetchedSCMId,
+      SCMStorageConfig scmStorageConfig, InetSocketAddress scmAddress) {
+
+    try {
+
+      CertificateServer rootCAServer =
+          initializeRootCertificateServer(config, null, scmStorageConfig);
+
+      PKCS10CertificationRequest csr = generateCSR(client, scmStorageConfig,
+          config, scmAddress, fetchedSCMId);
+
+      X509CertificateHolder subSCMCertHolder = rootCAServer.
+          requestCertificate(csr, KERBEROS_TRUSTED, SCM).get();
+
+      X509CertificateHolder rootCACertificateHolder =
+          rootCAServer.getCACertificate();
+
+      String pemEncodedCert =
+          CertificateCodec.getPEMEncodedString(subSCMCertHolder);
+
+      String pemEncodedRootCert =
+          CertificateCodec.getPEMEncodedString(rootCACertificateHolder);
+
+
+      client.storeCertificate(pemEncodedRootCert, true, true);
+      client.storeCertificate(pemEncodedCert, true);
+
+
+      persistSubCACertificate(config, client, subSCMCertHolder);
+
+      // Persist scm cert serial ID.
+      scmStorageConfig.setScmCertSerialId(subSCMCertHolder.getSerialNumber()
+          .toString());
+    } catch (InterruptedException | ExecutionException| IOException |
+        CertificateException  e) {
+      LOG.error("Error while fetching/storing SCM signed certificate.", e);
+      throw new RuntimeException(e);
+    }
+
+  }
+
+  /**
+   * This function creates/initializes a certificate server as needed.
+   * This function is idempotent, so calling this again and again after the
+   * server is initialized is not a problem.
+   *
+   * @param config
+   * @param scmCertStore
+   * @param scmStorageConfig
+   */
+  public static CertificateServer initializeRootCertificateServer(
+      OzoneConfiguration config, SCMCertStore scmCertStore,
+      SCMStorageConfig scmStorageConfig)
+      throws IOException {
+    String subject = SCM_ROOT_CA_PREFIX +
+        InetAddress.getLocalHost().getHostName();
+
+    DefaultCAServer rootCAServer = new DefaultCAServer(subject,
+        scmStorageConfig.getClusterID(),
+        scmStorageConfig.getScmId(), scmCertStore, new DefaultCAProfile(),
+        SCM_ROOT_CA_COMPONENT_NAME);
+
+    rootCAServer.init(new SecurityConfig(config),
+        CertificateServer.CAType.SELF_SIGNED_CA);
+
+    return rootCAServer;
+  }
+
+  /**
+   * Generate CSR to obtain SCM sub CA certificate.
+   */
+  private static PKCS10CertificationRequest generateCSR(
+      CertificateClient client, SCMStorageConfig scmStorageConfig,
+      OzoneConfiguration config, InetSocketAddress scmAddress,
+      String fetchedSCMId) throws IOException {
+    CertificateSignRequest.Builder builder = client.getCSRBuilder();
+    KeyPair keyPair = new KeyPair(client.getPublicKey(),
+        client.getPrivateKey());
+
+    // Get host name.
+    String hostname = scmAddress.getAddress().getHostName();
+
+    String subject = SCM_SUB_CA_PREFIX + hostname;
+
+    builder.setKey(keyPair)
+        .setConfiguration(config)
+        .setScmID(fetchedSCMId)
+        .setClusterID(scmStorageConfig.getClusterID())
+        .setSubject(subject);
+
+
+    LOG.info("Creating csr for SCM->hostName:{},scmId:{},clusterId:{}," +
+            "subject:{}", hostname, fetchedSCMId,
+        scmStorageConfig.getClusterID(), subject);
+
+    return builder.build();
+  }
+
+  /**
+   * Persists the sub SCM signed certificate to the location which can be
+   * read by sub CA Certificate server.
+   * @param config
+   * @param certificateClient
+   * @param certificateHolder
+   * @throws IOException
+   */
+  private static void persistSubCACertificate(OzoneConfiguration config,
+      CertificateClient certificateClient,
+      X509CertificateHolder certificateHolder) throws IOException {
+    CertificateCodec certCodec =
+        new CertificateCodec(new SecurityConfig(config),
+            certificateClient.getComponentName());
+
+    certCodec.writeCertificate(certificateHolder);
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
new file mode 100644
index 0000000..2c4b98f
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos;
+import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointResponseProto;
+import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolServiceGrpc;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
+import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder;
+import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.UncheckedIOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Grpc client to download a Rocks db checkpoint from leader node
+ * in SCM HA ring.
+ */
+public class InterSCMGrpcClient implements SCMSnapshotDownloader{
+  private static final Logger LOG =
+      LoggerFactory.getLogger(InterSCMGrpcClient.class);
+
+  private final ManagedChannel channel;
+
+  private final InterSCMProtocolServiceGrpc.InterSCMProtocolServiceStub
+      client;
+
+  private final long timeout;
+
+  public InterSCMGrpcClient(final String host, final ConfigurationSource conf) {
+    Preconditions.checkNotNull(conf);
+    int port = conf.getInt(ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY,
+        ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT);
+    timeout =
+        conf.getObject(SCMHAConfiguration.class).getGrpcDeadlineInterval();
+    NettyChannelBuilder channelBuilder =
+        NettyChannelBuilder.forAddress(host, port).usePlaintext()
+            .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
+    channel = channelBuilder.build();
+    client = InterSCMProtocolServiceGrpc.newStub(channel).
+        withDeadlineAfter(timeout, TimeUnit.SECONDS);
+  }
+
+
+  @Override
+  public CompletableFuture<Path> download(final Path outputPath) {
+    // By default on every checkpoint, the rocks db will be flushed
+    InterSCMProtocolProtos.CopyDBCheckpointRequestProto request =
+        InterSCMProtocolProtos.CopyDBCheckpointRequestProto.newBuilder()
+            .setFlush(true)
+            .build();
+    CompletableFuture<Path> response = new CompletableFuture<>();
+
+
+    client.download(request,
+        new StreamDownloader(response, outputPath));
+
+    return response;
+  }
+
+  public void shutdown() {
+    channel.shutdown();
+    try {
+      channel.awaitTermination(5, TimeUnit.SECONDS);
+    } catch (Exception e) {
+      LOG.error("failed to shutdown replication channel", e);
+    }
+  }
+
+  @Override
+  public void close() throws Exception {
+    shutdown();
+  }
+
+  /**
+   * gRPC stream observer to CompletableFuture adapter.
+   */
+  public static class StreamDownloader
+      implements StreamObserver<CopyDBCheckpointResponseProto> {
+
+    private final CompletableFuture<Path> response;
+    private final OutputStream stream;
+    private final Path outputPath;
+
+    public StreamDownloader(CompletableFuture<Path> response,
+        Path outputPath) {
+      this.response = response;
+      this.outputPath = outputPath;
+      try {
+        Preconditions.checkNotNull(outputPath, "Output path cannot be null");
+        stream = new FileOutputStream(outputPath.toFile());
+      } catch (IOException e) {
+        throw new UncheckedIOException(
+            "Output path can't be used: " + outputPath, e);
+      }
+    }
+
+    @Override
+    public void onNext(CopyDBCheckpointResponseProto checkPoint) {
+      try {
+        checkPoint.getData().writeTo(stream);
+      } catch (IOException e) {
+        onError(e);
+      }
+    }
+
+    @Override
+    public void onError(Throwable throwable) {
+      try {
+        LOG.error("Download of checkpoint {} was unsuccessful",
+            outputPath, throwable);
+        stream.close();
+        deleteOutputOnFailure();
+        response.completeExceptionally(throwable);
+      } catch (IOException e) {
+        LOG.error("Failed to close {}}",
+            outputPath, e);
+        response.completeExceptionally(e);
+      }
+    }
+
+    @Override
+    public void onCompleted() {
+      try {
+        stream.close();
+        LOG.info("Checkpoint is downloaded to {}", outputPath);
+        response.complete(outputPath);
+      } catch (IOException e) {
+        LOG.error("Downloaded checkpoint OK, but failed to close {}",
+            outputPath, e);
+        response.completeExceptionally(e);
+      }
+
+    }
+
+    private void deleteOutputOnFailure() {
+      try {
+        Files.delete(outputPath);
+      } catch (IOException ex) {
+        LOG.error("Failed to delete destination {} for " +
+                "unsuccessful download",
+            outputPath, ex);
+      }
+    }
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
new file mode 100644
index 0000000..d92220a
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcProtocolService.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.ratis.thirdparty.io.grpc.Server;
+import org.apache.ratis.thirdparty.io.grpc.ServerBuilder;
+import org.apache.ratis.thirdparty.io.grpc.netty.NettyServerBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Service to serve SCM DB checkpoints available for SCM HA.
+ * Ideally should only be run on a ratis leader.
+ */
+public class InterSCMGrpcProtocolService {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(InterSCMGrpcService.class);
+
+  private final int port;
+  private Server server;
+  private final AtomicBoolean isStarted = new AtomicBoolean(false);
+
+  public InterSCMGrpcProtocolService(final ConfigurationSource conf,
+      final StorageContainerManager scm) {
+    Preconditions.checkNotNull(conf);
+    this.port = conf.getInt(ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY,
+        ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT);
+
+    NettyServerBuilder nettyServerBuilder =
+        ((NettyServerBuilder) ServerBuilder.forPort(port))
+            .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE);
+
+    InterSCMGrpcService service = new InterSCMGrpcService(scm);
+    ServerBuilder b = nettyServerBuilder.addService(service);
+    Preconditions.checkNotNull(b);
+    server = nettyServerBuilder.build();
+  }
+
+  public int getPort() {
+    return this.port;
+  }
+
+  public void start() throws IOException {
+    if (!isStarted.compareAndSet(false, true)) {
+      LOG.info("Ignore. already started.");
+      return;
+    } else {
+      server.start();
+    }
+  }
+
+  public void stop() {
+    if (isStarted.get()) {
+      server.shutdown();
+      try {
+        server.awaitTermination(5, TimeUnit.SECONDS);
+      } catch (Exception e) {
+        LOG.error("failed to shutdown XceiverServerGrpc", e);
+      }
+      isStarted.set(false);
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java
new file mode 100644
index 0000000..e962ded
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcService.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointRequestProto;
+import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos.CopyDBCheckpointResponseProto;
+import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolServiceGrpc;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
+
+/**
+ * Service to handle Rocks db Checkpointing.
+ */
+public class InterSCMGrpcService extends
+    InterSCMProtocolServiceGrpc.InterSCMProtocolServiceImplBase {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(InterSCMGrpcService.class);
+
+  private static final int BUFFER_SIZE = 1024 * 1024;
+
+  private final SCMDBCheckpointProvider provider;
+
+  private final StorageContainerManager scm;
+
+  public InterSCMGrpcService(final StorageContainerManager scm) {
+    Preconditions.checkNotNull(scm);
+    this.scm = scm;
+    provider =
+        new SCMDBCheckpointProvider(scm.getScmMetadataStore().getStore());
+  }
+
+  @Override
+  public void download(CopyDBCheckpointRequestProto request,
+      StreamObserver<CopyDBCheckpointResponseProto> responseObserver) {
+    try {
+      scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
+      Table<String, TransactionInfo> transactionInfoTable =
+          Arrays.stream(new SCMDBDefinition().getColumnFamilies())
+              .filter(t -> t.getValueType() == TransactionInfo.class)
+              .findFirst().get().getTable(scm.getScmMetadataStore().getStore());
+
+      TransactionInfo transactionInfo =
+          transactionInfoTable.get(TRANSACTION_INFO_KEY);
+      Preconditions.checkNotNull(transactionInfo);
+      SCMGrpcOutputStream outputStream =
+          new SCMGrpcOutputStream(responseObserver, scm.getClusterId(),
+              BUFFER_SIZE);
+      provider.writeDBCheckPointToSream(outputStream, request.getFlush());
+
+    } catch (IOException e) {
+      LOG.error("Error streaming SCM DB checkpoint", e);
+      responseObserver.onError(e);
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/MockSCMHADBTransactionBuffer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/MockSCMHADBTransactionBuffer.java
new file mode 100644
index 0000000..174a4f8
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/MockSCMHADBTransactionBuffer.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.ratis.statemachine.SnapshotInfo;
+
+import java.io.IOException;
+
+public class MockSCMHADBTransactionBuffer implements SCMHADBTransactionBuffer {
+  private DBStore dbStore;
+  private BatchOperation currentBatchOperation;
+
+  public MockSCMHADBTransactionBuffer() {
+  }
+
+  public MockSCMHADBTransactionBuffer(DBStore store) {
+    this.dbStore = store;
+  }
+
+  private BatchOperation getCurrentBatchOperation() {
+    if (currentBatchOperation == null) {
+      if (dbStore != null) {
+        currentBatchOperation = dbStore.initBatchOperation();
+      } else {
+        currentBatchOperation = new RDBBatchOperation();
+      }
+    }
+    return currentBatchOperation;
+  }
+
+  @Override
+  public <KEY, VALUE> void addToBuffer(
+      Table<KEY, VALUE> table, KEY key, VALUE value) throws IOException {
+    table.putWithBatch(getCurrentBatchOperation(), key, value);
+  }
+
+  @Override
+  public <KEY, VALUE> void removeFromBuffer(Table<KEY, VALUE> table, KEY key)
+      throws IOException {
+    table.deleteWithBatch(getCurrentBatchOperation(), key);
+  }
+
+  @Override
+  public void updateLatestTrxInfo(TransactionInfo info) {
+
+  }
+
+  @Override
+  public TransactionInfo getLatestTrxInfo() {
+    return null;
+  }
+
+  @Override
+  public SnapshotInfo getLatestSnapshot() {
+    return null;
+  }
+
+  @Override
+  public void setLatestSnapshot(SnapshotInfo latestSnapshot) {
+
+  }
+
+  @Override
+  public void flush() throws IOException {
+    if (dbStore != null) {
+      dbStore.commitBatchOperation(getCurrentBatchOperation());
+      currentBatchOperation.close();
+      currentBatchOperation = null;
+    }
+  }
+
+  @Override
+  public void init() throws IOException {
+
+  }
+
+  @Override
+  public void close() throws IOException {
+    flush();
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
new file mode 100644
index 0000000..91c4307
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientReply;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.RaftGroupMemberId;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.apache.ratis.server.RaftServer;
+import org.apache.ratis.protocol.exceptions.StateMachineException;
+import org.apache.ratis.server.protocol.TermIndex;
+
+// TODO: Move this class to test package after fixing Recon
+/**
+ * Mock SCMHAManager implementation for testing.
+ */
+public final class MockSCMHAManager implements SCMHAManager {
+
+  private final SCMRatisServer ratisServer;
+  private boolean isLeader;
+  private DBTransactionBuffer transactionBuffer;
+
+  public static SCMHAManager getInstance(boolean isLeader) {
+    return new MockSCMHAManager(isLeader);
+  }
+
+  public static SCMHAManager getInstance(boolean isLeader,
+      DBTransactionBuffer buffer) {
+    return new MockSCMHAManager(isLeader, buffer);
+  }
+
+  /**
+   * Creates MockSCMHAManager instance.
+   */
+  private MockSCMHAManager(boolean isLeader) {
+    this(isLeader, new MockSCMHADBTransactionBuffer());
+  }
+
+  private MockSCMHAManager(boolean isLeader, DBTransactionBuffer buffer) {
+    this.ratisServer = new MockRatisServer();
+    this.isLeader = isLeader;
+    this.transactionBuffer = buffer;
+  }
+
+  @Override
+  public void start() throws IOException {
+    ratisServer.start();
+  }
+
+  /**
+   * Informs MockRatisServe to behaviour as a leader SCM or a follower SCM.
+   */
+  boolean isLeader() {
+    return isLeader;
+  }
+
+  public void setIsLeader(boolean isLeader) {
+    this.isLeader = isLeader;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public SCMRatisServer getRatisServer() {
+    return ratisServer;
+  }
+
+  @Override
+  public DBTransactionBuffer getDBTransactionBuffer() {
+    return transactionBuffer;
+  }
+
+  @Override
+  public SCMHADBTransactionBuffer asSCMHADBTransactionBuffer() {
+    return null;
+  }
+
+  @Override
+  public SCMSnapshotProvider getSCMSnapshotProvider() {
+    return null;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void shutdown() throws IOException {
+    ratisServer.stop();
+  }
+
+  @Override
+  public boolean addSCM(AddSCMRequest request) throws IOException {
+    return false;
+  }
+
+  @Override
+  public TermIndex installSnapshotFromLeader(String leaderId) {
+    return null;
+  }
+
+  private class MockRatisServer implements SCMRatisServer {
+
+    private Map<RequestType, Object> handlers =
+        new EnumMap<>(RequestType.class);
+
+    @Override
+    public void start() {
+    }
+
+    @Override
+    public void registerStateMachineHandler(final RequestType handlerType,
+        final Object handler) {
+      handlers.put(handlerType, handler);
+    }
+
+    @Override
+    public SCMRatisResponse submitRequest(final SCMRatisRequest request)
+        throws IOException {
+      final RaftGroupMemberId raftId = RaftGroupMemberId.valueOf(
+          RaftPeerId.valueOf("peer"), RaftGroupId.randomId());
+      RaftClientReply reply;
+      if (isLeader()) {
+        try {
+          final Message result = process(request);
+          reply = RaftClientReply.newBuilder().setClientId(ClientId.randomId())
+              .setServerId(raftId).setGroupId(RaftGroupId.emptyGroupId())
+              .setCallId(1L).setSuccess(true).setMessage(result)
+              .setException(null).setLogIndex(1L).build();
+        } catch (Exception ex) {
+          reply = RaftClientReply.newBuilder().setClientId(ClientId.randomId())
+              .setServerId(raftId).setGroupId(RaftGroupId.emptyGroupId())
+              .setCallId(1L).setSuccess(false).setMessage(Message.EMPTY)
+              .setException(new StateMachineException(raftId, ex))
+              .setLogIndex(1L).build();
+        }
+      } else {
+        reply = RaftClientReply.newBuilder().setClientId(ClientId.randomId())
+            .setServerId(raftId).setGroupId(RaftGroupId.emptyGroupId())
+            .setCallId(1L).setSuccess(false).setMessage(Message.EMPTY)
+            .setException(triggerNotLeaderException()).setLogIndex(1L).build();
+      }
+      return SCMRatisResponse.decode(reply);
+    }
+
+    private Message process(final SCMRatisRequest request) throws Exception {
+      try {
+        final Object handler = handlers.get(request.getType());
+
+        if (handler == null) {
+          throw new IOException(
+              "No handler found for request type " + request.getType());
+        }
+
+        final List<Class<?>> argumentTypes = new ArrayList<>();
+        for (Object args : request.getArguments()) {
+          argumentTypes.add(args.getClass());
+        }
+        final Object result = handler.getClass()
+            .getMethod(request.getOperation(),
+                argumentTypes.toArray(new Class<?>[0]))
+            .invoke(handler, request.getArguments());
+
+        return SCMRatisResponse.encode(result);
+      } catch (NoSuchMethodException | SecurityException ex) {
+        throw new InvalidProtocolBufferException(ex.getMessage());
+      } catch (InvocationTargetException e) {
+        final Exception targetEx = (Exception) e.getTargetException();
+        throw targetEx != null ? targetEx : e;
+      }
+    }
+
+    @Override
+    public void stop() {
+    }
+
+    @Override
+    public RaftServer.Division getDivision() {
+      return null;
+    }
+
+    @Override
+    public List<String> getRatisRoles() {
+      return Arrays
+          .asList("180.3.14.5:9865", "180.3.14.21:9865", "180.3.14.145:9865");
+    }
+
+    @Override
+    public NotLeaderException triggerNotLeaderException() {
+      return new NotLeaderException(RaftGroupMemberId
+          .valueOf(RaftPeerId.valueOf("peer"), RaftGroupId.randomId()), null,
+          new ArrayList<>());
+    }
+
+    @Override
+    public SCMStateMachine getSCMStateMachine() {
+      return null;
+    }
+
+    @Override
+    public boolean addSCM(AddSCMRequest request) throws IOException {
+      return false;
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
new file mode 100644
index 0000000..75db7bf
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.common.base.Strings;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.server.ServerUtils;
+import org.apache.ratis.RaftConfigKeys;
+import org.apache.ratis.conf.RaftProperties;
+import org.apache.ratis.grpc.GrpcConfigKeys;
+import org.apache.ratis.rpc.RpcType;
+import org.apache.ratis.server.RaftServerConfigKeys;
+import org.apache.ratis.util.SizeInBytes;
+import org.apache.ratis.util.TimeDuration;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.ratis.server.RaftServerConfigKeys.Log;
+import static org.apache.ratis.server.RaftServerConfigKeys.RetryCache;
+import static org.apache.ratis.server.RaftServerConfigKeys.Rpc;
+import static org.apache.ratis.server.RaftServerConfigKeys.Snapshot;
+
+/**
+ * Ratis Util for SCM HA.
+ */
+public final class RatisUtil {
+
+  private RatisUtil() {
+  }
+
+
+  /**
+   * Constructs new Raft Properties instance using {@link SCMHAConfiguration}.
+   * @param haConf SCMHAConfiguration
+   * @param conf ConfigurationSource
+   */
+  public static RaftProperties newRaftProperties(
+      final SCMHAConfiguration haConf, final ConfigurationSource conf) {
+    //TODO: Remove ConfigurationSource!
+    // TODO: Check the default values.
+    final RaftProperties properties = new RaftProperties();
+    setRaftStorageDir(properties, haConf, conf);
+    setRaftRpcProperties(properties, haConf, conf);
+    setRaftLogProperties(properties, haConf);
+    setRaftRetryCacheProperties(properties, haConf);
+    setRaftSnapshotProperties(properties, haConf);
+    setRaftLeadElectionProperties(properties, haConf);
+    return properties;
+  }
+
+  /**
+   * Set the local directory where ratis logs will be stored.
+   *
+   * @param properties RaftProperties instance which will be updated
+   * @param haConf SCMHAConfiguration
+   * @param conf ConfigurationSource
+   */
+  public static void setRaftStorageDir(final RaftProperties properties,
+                                       final SCMHAConfiguration haConf,
+                                       final ConfigurationSource conf) {
+    RaftServerConfigKeys.setStorageDir(properties,
+        Collections.singletonList(new File(getRatisStorageDir(haConf, conf))));
+  }
+
+  public static String getRatisStorageDir(final SCMHAConfiguration haConf,
+      final ConfigurationSource conf) {
+    String storageDir = haConf.getRatisStorageDir();
+    if (Strings.isNullOrEmpty(storageDir)) {
+      File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf);
+      storageDir = (new File(metaDirPath, "scm-ha")).getPath();
+    }
+    return storageDir;
+  }
+  /**
+   * Set properties related to Raft RPC.
+   *
+   * @param properties RaftProperties instance which will be updated
+   * @param conf SCMHAConfiguration
+   */
+  private static void setRaftRpcProperties(final RaftProperties properties,
+      final SCMHAConfiguration conf, ConfigurationSource ozoneConf) {
+    RaftConfigKeys.Rpc.setType(properties,
+        RpcType.valueOf(conf.getRatisRpcType()));
+    GrpcConfigKeys.Server.setPort(properties, ozoneConf
+        .getInt(ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY,
+            ScmConfigKeys.OZONE_SCM_RATIS_PORT_DEFAULT));
+    GrpcConfigKeys.setMessageSizeMax(properties,
+        SizeInBytes.valueOf("32m"));
+
+    Rpc.setRequestTimeout(properties, TimeDuration.valueOf(
+        conf.getRatisRequestTimeout(), TimeUnit.MILLISECONDS));
+    Rpc.setTimeoutMin(properties, TimeDuration.valueOf(
+        conf.getLeaderElectionMinTimeout(), TimeUnit.MILLISECONDS));
+    Rpc.setTimeoutMax(properties, TimeDuration.valueOf(
+        conf.getLeaderElectionMaxTimeout(), TimeUnit.MILLISECONDS));
+    Rpc.setSlownessTimeout(properties, TimeDuration.valueOf(
+        conf.getRatisNodeFailureTimeout(), TimeUnit.MILLISECONDS));
+  }
+
+  /**
+   * Set properties related to Raft leader election.
+   *
+   * @param properties RaftProperties instance which will be updated
+   * @param conf SCMHAConfiguration
+   */
+  private static void setRaftLeadElectionProperties(
+      final RaftProperties properties, final SCMHAConfiguration conf) {
+    // Disable the pre vote feature (related to leader election) in Ratis
+    RaftServerConfigKeys.LeaderElection.setPreVote(properties, false);
+  }
+
+  /**
+   * Set properties related to Raft Log.
+   *
+   * @param properties RaftProperties instance which will be updated
+   * @param conf SCMHAConfiguration
+   */
+  private static void setRaftLogProperties(final RaftProperties properties,
+                                           final SCMHAConfiguration conf) {
+    Log.setSegmentSizeMax(properties,
+        SizeInBytes.valueOf(conf.getRaftSegmentSize()));
+    Log.Appender.setBufferElementLimit(properties,
+        conf.getRaftLogAppenderQueueByteLimit());
+    Log.Appender.setBufferByteLimit(properties,
+        SizeInBytes.valueOf(conf.getRaftLogAppenderQueueByteLimit()));
+    Log.setPreallocatedSize(properties,
+        SizeInBytes.valueOf(conf.getRaftSegmentPreAllocatedSize()));
+    Log.Appender.setInstallSnapshotEnabled(properties, false);
+    Log.setPurgeUptoSnapshotIndex(properties, conf.getRaftLogPurgeEnabled());
+    Log.setPurgeGap(properties, conf.getRaftLogPurgeGap());
+    Log.setSegmentCacheNumMax(properties, 2);
+  }
+
+  /**
+   * Set properties related to Raft Retry Cache.
+   *
+   * @param properties RaftProperties instance which will be updated
+   * @param conf SCMHAConfiguration
+   */
+  private static void setRaftRetryCacheProperties(
+      final RaftProperties properties, final SCMHAConfiguration conf) {
+    RetryCache.setExpiryTime(properties, TimeDuration.valueOf(
+        conf.getRatisRetryCacheTimeout(), TimeUnit.MILLISECONDS));
+  }
+
+  /**
+   * Set properties related to Raft Snapshot.
+   *
+   * @param properties RaftProperties instance which will be updated
+   * @param conf SCMHAConfiguration
+   */
+  private static void setRaftSnapshotProperties(
+      final RaftProperties properties, final SCMHAConfiguration conf) {
+    Snapshot.setAutoTriggerEnabled(properties, true);
+    Snapshot.setAutoTriggerThreshold(properties,
+        conf.getRatisSnapshotThreshold());
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ReflectionUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ReflectionUtil.java
new file mode 100644
index 0000000..7c54723
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ReflectionUtil.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Reflection util for SCM HA.
+ */
+public final class ReflectionUtil {
+
+  private static Map<String, Class<?>> classCache = new HashMap<>();
+
+  private ReflectionUtil() {
+  }
+
+  /**
+   * Returns the {@code Class} object associated with the given string name.
+   *
+   * @param className the fully qualified name of the desired class.
+   * @return the {@code Class} object for the class with the
+   *         specified name.
+   * @throws ClassNotFoundException if the class cannot be located
+   */
+  public static Class<?> getClass(String className)
+      throws ClassNotFoundException {
+    if (!classCache.containsKey(className)) {
+      classCache.put(className, Class.forName(className));
+    }
+    return classCache.get(className);
+  }
+
+  /**
+   * Returns a {@code Method} object that reflects the specified public
+   * member method of the given {@code Class} object.
+   *
+   * @param clazz the class object which has the method
+   * @param methodName the name of the method
+   * @param arg the list of parameters
+   * @return the {@code Method} object that matches the specified
+   *         {@code name} and {@code parameterTypes}
+   * @throws NoSuchMethodException if a matching method is not found
+   *         or if the name is "&lt;init&gt;"or "&lt;clinit&gt;".
+   */
+  public static Method getMethod(
+      final Class<?> clazz, final String methodName, final Class<?>... arg)
+      throws NoSuchMethodException {
+    return clazz.getMethod(methodName, arg);
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
new file mode 100644
index 0000000..2d4941f
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * SCMContext is the single source of truth for some key information shared
+ * across all components within SCM, including:
+ * 1) RaftServer related info, e.g., isLeader, term.
+ * 2) SafeMode related info, e.g., inSafeMode, preCheckComplete.
+ *
+ * If current SCM is not running upon Ratis, the {@link SCMContext#isLeader}
+ * check will always return true, and {@link SCMContext#getTermOfLeader} will
+ * return INVALID_TERM.
+ */
+public final class SCMContext {
+  private static final Logger LOG = LoggerFactory.getLogger(SCMContext.class);
+
+  /**
+   * The initial value of term in raft is 0, and term increases monotonically.
+   * term equals INVALID_TERM indicates current SCM is running without Ratis.
+   */
+  public static final long INVALID_TERM = -1;
+
+  private static final SCMContext EMPTY_CONTEXT
+      = new SCMContext.Builder().build();
+
+  /**
+   * Used by non-HA mode SCM, Recon and Unit Tests.
+   */
+  public static SCMContext emptyContext() {
+    return EMPTY_CONTEXT;
+  }
+
+  /**
+   * Raft related info.
+   */
+  private boolean isLeader;
+  private long term;
+
+  /**
+   * Safe mode related info.
+   */
+  private SafeModeStatus safeModeStatus;
+
+  private final StorageContainerManager scm;
+  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+
+  private SCMContext(boolean isLeader, long term,
+      final SafeModeStatus safeModeStatus, final StorageContainerManager scm) {
+    this.isLeader = isLeader;
+    this.term = term;
+    this.safeModeStatus = safeModeStatus;
+    this.scm = scm;
+  }
+
+  /**
+   * @param leader  : is leader or not
+   * @param newTerm : term if current SCM becomes leader
+   */
+  public void updateLeaderAndTerm(boolean leader, long newTerm) {
+    lock.writeLock().lock();
+    try {
+      LOG.info("update <isLeader,term> from <{},{}> to <{},{}>",
+          isLeader, term, leader, newTerm);
+
+      isLeader = leader;
+      term = newTerm;
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Check whether current SCM is leader or not.
+   *
+   * @return isLeader
+   */
+  public boolean isLeader() {
+    lock.readLock().lock();
+    try {
+      if (term == INVALID_TERM) {
+        return true;
+      }
+
+      return isLeader;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Get term of current leader SCM.
+   *
+   * @return term
+   * @throws NotLeaderException if isLeader is false
+   */
+  public long getTermOfLeader() throws NotLeaderException {
+    lock.readLock().lock();
+    try {
+      if (term == INVALID_TERM) {
+        return term;
+      }
+
+      if (!isLeader) {
+        LOG.warn("getTerm is invoked when not leader.");
+        throw scm.getScmHAManager()
+            .getRatisServer()
+            .triggerNotLeaderException();
+      }
+      return term;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * @param status : update SCMContext with latest SafeModeStatus.
+   */
+  public void updateSafeModeStatus(SafeModeStatus status) {
+    lock.writeLock().lock();
+    try {
+      LOG.info("Update SafeModeStatus from {} to {}.", safeModeStatus, status);
+      safeModeStatus = status;
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  public boolean isInSafeMode() {
+    lock.readLock().lock();
+    try {
+      return safeModeStatus.isInSafeMode();
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  public boolean isPreCheckComplete() {
+    lock.readLock().lock();
+    try {
+      return safeModeStatus.isPreCheckComplete();
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * @return StorageContainerManager
+   */
+  public StorageContainerManager getScm() {
+    Preconditions.checkNotNull(scm, "scm == null");
+    return scm;
+  }
+
+  public static class Builder {
+    /**
+     * The default context:
+     * running without Ratis, out of safe mode, and has completed preCheck.
+     */
+    private boolean isLeader = false;
+    private long term = INVALID_TERM;
+    private boolean isInSafeMode = false;
+    private boolean isPreCheckComplete = true;
+    private StorageContainerManager scm = null;
+
+    public Builder setLeader(boolean leader) {
+      this.isLeader = leader;
+      return this;
+    }
+
+    public Builder setTerm(long newTerm) {
+      this.term = newTerm;
+      return this;
+    }
+
+    public Builder setIsInSafeMode(boolean inSafeMode) {
+      this.isInSafeMode = inSafeMode;
+      return this;
+    }
+
+    public Builder setIsPreCheckComplete(boolean preCheckComplete) {
+      this.isPreCheckComplete = preCheckComplete;
+      return this;
+    }
+
+    public Builder setSCM(StorageContainerManager storageContainerManager) {
+      this.scm = storageContainerManager;
+      return this;
+    }
+
+    public SCMContext build() {
+      return new SCMContext(
+          isLeader,
+          term,
+          new SafeModeStatus(isInSafeMode, isPreCheckComplete),
+          scm);
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMDBCheckpointProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMDBCheckpointProvider.java
new file mode 100644
index 0000000..07e276f
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMDBCheckpointProvider.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Path;
+import java.time.Duration;
+import java.time.Instant;
+
+
+// TODO: define a generic interface for this
+public class SCMDBCheckpointProvider {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMDBCheckpointProvider.class);
+  private transient DBStore scmDbStore;;
+
+  public SCMDBCheckpointProvider(DBStore scmDbStore) {
+    this.scmDbStore = scmDbStore;
+  }
+
+  public void writeDBCheckPointToSream(OutputStream stream, boolean flush)
+      throws IOException {
+    LOG.info("Received request to obtain SCM DB checkpoint snapshot");
+    if (scmDbStore == null) {
+      LOG.error("Unable to process checkpointing request. DB Store is null");
+      return;
+    }
+
+    DBCheckpoint checkpoint = null;
+    try {
+
+      checkpoint = scmDbStore.getCheckpoint(flush);
+      if (checkpoint == null || checkpoint.getCheckpointLocation() == null) {
+        throw new IOException("Unable to process metadata snapshot request. "
+            + "Checkpoint request returned null.");
+      }
+
+      Path file = checkpoint.getCheckpointLocation().getFileName();
+      if (file == null) {
+        return;
+      }
+
+      Instant start = Instant.now();
+      HddsServerUtil.writeDBCheckpointToStream(checkpoint, stream);
+      Instant end = Instant.now();
+
+      long duration = Duration.between(start, end).toMillis();
+      LOG.info("Time taken to write the checkpoint to response output " +
+          "stream: {} milliseconds", duration);
+
+    } catch (IOException ioe) {
+      LOG.error("Unable to process metadata snapshot request. ", ioe);
+      throw ioe;
+    } finally {
+      if (checkpoint != null) {
+        try {
+          checkpoint.cleanupCheckpoint();
+        } catch (IOException e) {
+          LOG.error("Error trying to clean checkpoint at {} .",
+              checkpoint.getCheckpointLocation().toString());
+        }
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java
new file mode 100644
index 0000000..1194a52
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Stream to which the tar db checkpoint will be transferred over to the
+ * destination over grpc.
+ * TODO: Make it a generic utility to be used both during container replication
+ * as well as SCM checkpoint transfer
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.protocol.scm.proto.InterSCMProtocolProtos;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * Adapter from {@code OutputStream} to gRPC {@code StreamObserver}.
+ * Data is buffered in a limited buffer of the specified size.
+ */
+class SCMGrpcOutputStream extends OutputStream {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMGrpcOutputStream.class);
+
+  private final StreamObserver<InterSCMProtocolProtos.
+      CopyDBCheckpointResponseProto> responseObserver;
+
+  private final ByteString.Output buffer;
+
+  private final String clusterId;
+
+  private final int bufferSize;
+
+  private long writtenBytes;
+
+  SCMGrpcOutputStream(
+      StreamObserver<InterSCMProtocolProtos.
+          CopyDBCheckpointResponseProto> responseObserver,
+      String clusterId, int bufferSize) {
+    this.responseObserver = responseObserver;
+    this.clusterId = clusterId;
+    this.bufferSize = bufferSize;
+    buffer = ByteString.newOutput(bufferSize);
+  }
+
+  @Override public void write(int b) {
+    try {
+      buffer.write(b);
+      if (buffer.size() >= bufferSize) {
+        flushBuffer(false);
+      }
+    } catch (Exception ex) {
+      responseObserver.onError(ex);
+    }
+  }
+
+  @Override public void write(@Nonnull byte[] data, int offset, int length) {
+    if ((offset < 0) || (offset > data.length) || (length < 0) || (
+        (offset + length) > data.length) || ((offset + length) < 0)) {
+      throw new IndexOutOfBoundsException();
+    } else if (length == 0) {
+      return;
+    }
+
+    try {
+      if (buffer.size() >= bufferSize) {
+        flushBuffer(false);
+      }
+
+      int remaining = length;
+      int off = offset;
+      int len = Math.min(remaining, bufferSize - buffer.size());
+      while (remaining > 0) {
+        buffer.write(data, off, len);
+        if (buffer.size() >= bufferSize) {
+          flushBuffer(false);
+        }
+        off += len;
+        remaining -= len;
+        len = Math.min(bufferSize, remaining);
+      }
+    } catch (Exception ex) {
+      responseObserver.onError(ex);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    flushBuffer(true);
+    LOG.info("Sent {} bytes for cluster {}", writtenBytes, clusterId);
+    responseObserver.onCompleted();
+    buffer.close();
+  }
+
+  private void flushBuffer(boolean eof) {
+    int length = buffer.size();
+    if (length > 0) {
+      ByteString data = buffer.toByteString();
+      LOG.debug("Sending {} bytes (of type {})", length,
+          data.getClass().getSimpleName());
+      InterSCMProtocolProtos.CopyDBCheckpointResponseProto response =
+          InterSCMProtocolProtos.CopyDBCheckpointResponseProto.newBuilder()
+              .setClusterId(clusterId).setData(data).setEof(eof)
+              .setReadOffset(writtenBytes).setLen(length).build();
+      responseObserver.onNext(response);
+      writtenBytes += length;
+      buffer.reset();
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBuffer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBuffer.java
new file mode 100644
index 0000000..f3f9f97
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBuffer.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.ratis.statemachine.SnapshotInfo;
+
+import java.io.IOException;
+
+/**
+ * DB transaction that buffers SCM DB transactions. Call the flush method
+ * to flush a batch into SCM DB. This buffer also maintains a latest transaction
+ * info to indicate the information of the latest transaction in the buffer.
+ */
+public interface SCMHADBTransactionBuffer
+    extends DBTransactionBuffer {
+
+  void updateLatestTrxInfo(TransactionInfo info);
+
+  TransactionInfo getLatestTrxInfo();
+
+  SnapshotInfo getLatestSnapshot();
+
+  void setLatestSnapshot(SnapshotInfo latestSnapshot);
+
+  void flush() throws IOException;
+
+  void init() throws IOException;
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java
new file mode 100644
index 0000000..cdda49c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHADBTransactionBufferImpl.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
+import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImplV2;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.ratis.statemachine.SnapshotInfo;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
+
+/**
+ * This is a transaction buffer that buffers SCM DB operations for Pipeline and
+ * Container. When flush this buffer to DB, a transaction info will also be
+ * written into DB to indicate the term and transaction index for the latest
+ * operation in DB.
+ */
+public class SCMHADBTransactionBufferImpl implements SCMHADBTransactionBuffer {
+  private final StorageContainerManager scm;
+  private SCMMetadataStore metadataStore;
+  private BatchOperation currentBatchOperation;
+  private TransactionInfo latestTrxInfo;
+  private SnapshotInfo latestSnapshot;
+
+  public SCMHADBTransactionBufferImpl(StorageContainerManager scm)
+      throws IOException {
+    this.scm = scm;
+    init();
+  }
+
+  private BatchOperation getCurrentBatchOperation() {
+    return currentBatchOperation;
+  }
+
+  @Override
+  public <KEY, VALUE> void addToBuffer(
+      Table<KEY, VALUE> table, KEY key, VALUE value) throws IOException {
+    table.putWithBatch(getCurrentBatchOperation(), key, value);
+  }
+
+  @Override
+  public <KEY, VALUE> void removeFromBuffer(Table<KEY, VALUE> table, KEY key)
+      throws IOException {
+    table.deleteWithBatch(getCurrentBatchOperation(), key);
+  }
+
+  @Override
+  public void updateLatestTrxInfo(TransactionInfo info) {
+    if (info.compareTo(this.latestTrxInfo) <= 0) {
+      throw new IllegalArgumentException(
+          "Updating DB buffer transaction info by an older transaction info, "
+          + "current: " + this.latestTrxInfo + ", updating to: " + info);
+    }
+    this.latestTrxInfo = info;
+  }
+
+  @Override
+  public TransactionInfo getLatestTrxInfo() {
+    return this.latestTrxInfo;
+  }
+
+  @Override
+  public SnapshotInfo getLatestSnapshot() {
+    return latestSnapshot;
+  }
+
+  @Override
+  public void setLatestSnapshot(SnapshotInfo latestSnapshot) {
+    this.latestSnapshot = latestSnapshot;
+  }
+
+  @Override
+  public void flush() throws IOException {
+    // write latest trx info into trx table in the same batch
+    Table<String, TransactionInfo> transactionInfoTable
+        = metadataStore.getTransactionInfoTable();
+    transactionInfoTable.putWithBatch(currentBatchOperation,
+        TRANSACTION_INFO_KEY, latestTrxInfo);
+
+    metadataStore.getStore().commitBatchOperation(currentBatchOperation);
+    currentBatchOperation.close();
+    this.latestSnapshot = latestTrxInfo.toSnapshotInfo();
+    // reset batch operation
+    currentBatchOperation = metadataStore.getStore().initBatchOperation();
+
+    DeletedBlockLog deletedBlockLog = scm.getScmBlockManager()
+        .getDeletedBlockLog();
+    Preconditions.checkArgument(
+        deletedBlockLog instanceof DeletedBlockLogImplV2);
+    ((DeletedBlockLogImplV2) deletedBlockLog).onFlush();
+  }
+
+  @Override
+  public void init() throws IOException {
+    metadataStore = scm.getScmMetadataStore();
+
+    // initialize a batch operation during construction time
+    currentBatchOperation = this.metadataStore.getStore().initBatchOperation();
+    latestTrxInfo = this.metadataStore.getTransactionInfoTable()
+        .get(TRANSACTION_INFO_KEY);
+    if (latestTrxInfo == null) {
+      // transaction table is empty
+      latestTrxInfo =
+          TransactionInfo
+              .builder()
+              .setTransactionIndex(-1)
+              .setCurrentTerm(0)
+              .build();
+    }
+    latestSnapshot = latestTrxInfo.toSnapshotInfo();
+  }
+
+  @Override
+  public String toString() {
+    return latestTrxInfo.toString();
+  }
+
+  @Override
+  public void close() throws IOException {
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
new file mode 100644
index 0000000..62951d5
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.scm.metadata.Replicate;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * InvocationHandler which checks for {@link Replicate} annotation and
+ * dispatches the request to Ratis Server.
+ */
+public class SCMHAInvocationHandler implements InvocationHandler {
+
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(SCMHAInvocationHandler.class);
+
+  private final RequestType requestType;
+  private final Object localHandler;
+  private final SCMRatisServer ratisHandler;
+
+  /**
+   * TODO.
+   */
+  public SCMHAInvocationHandler(final RequestType requestType,
+                                final Object localHandler,
+                                final SCMRatisServer ratisHandler) {
+    this.requestType = requestType;
+    this.localHandler = localHandler;
+    this.ratisHandler = ratisHandler;
+    if (ratisHandler != null) {
+      ratisHandler.registerStateMachineHandler(requestType, localHandler);
+    }
+  }
+
+  @Override
+  public Object invoke(final Object proxy, final Method method,
+                       final Object[] args) throws Throwable {
+    try {
+      long startTime = Time.monotonicNow();
+      final Object result =
+          ratisHandler != null && method.isAnnotationPresent(Replicate.class) ?
+              invokeRatis(method, args) :
+              invokeLocal(method, args);
+      LOG.debug("Call: {} took {} ms", method, Time.monotonicNow() - startTime);
+      return result;
+    } catch(InvocationTargetException iEx) {
+      throw iEx.getCause();
+    }
+  }
+
+  /**
+   * TODO.
+   */
+  private Object invokeLocal(Method method, Object[] args)
+      throws InvocationTargetException, IllegalAccessException {
+    LOG.trace("Invoking method {} on target {} with arguments {}",
+        method, localHandler, args);
+    return method.invoke(localHandler, args);
+  }
+
+  /**
+   * TODO.
+   */
+  private Object invokeRatis(Method method, Object[] args)
+      throws Exception {
+    long startTime = Time.monotonicNowNanos();
+    Preconditions.checkNotNull(ratisHandler);
+    final SCMRatisResponse response =  ratisHandler.submitRequest(
+        SCMRatisRequest.of(requestType, method.getName(),
+            method.getParameterTypes(), args));
+    LOG.info("Invoking method {} on target {}, cost {}us",
+        method, ratisHandler, (Time.monotonicNowNanos() - startTime) / 1000.0);
+    if (response.isSuccess()) {
+      return response.getResult();
+    }
+    // Should we unwrap and throw proper exception from here?
+    throw response.getException();
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
new file mode 100644
index 0000000..c6dfd9c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.ratis.server.protocol.TermIndex;
+
+import java.io.IOException;
+
+/**
+ * SCMHAManager provides HA service for SCM.
+ */
+public interface SCMHAManager {
+
+  /**
+   * Starts HA service.
+   */
+  void start() throws IOException;
+
+  /**
+   * Returns RatisServer instance associated with the SCM instance.
+   */
+  SCMRatisServer getRatisServer();
+
+  /**
+   * Returns SCM snapshot provider.
+   */
+  SCMSnapshotProvider getSCMSnapshotProvider();
+
+  /**
+   * Returns DB transaction buffer.
+   */
+  DBTransactionBuffer getDBTransactionBuffer();
+
+  /**
+   * Returns the DBTransactionBuffer as SCMHADBTransactionBuffer if its
+   * valid.
+   * @return
+   */
+  SCMHADBTransactionBuffer asSCMHADBTransactionBuffer();
+
+  /**
+   * Stops the HA service.
+   */
+  void shutdown() throws IOException;
+
+  /**
+   * Adds the SC M instance to the SCM HA group.
+   * @param request AddSCM request
+   * @return status signying whether the AddSCM request succeeded or not.
+   * @throws IOException
+   */
+  boolean addSCM(AddSCMRequest request) throws IOException;
+
+  /**
+   * Download the SCM DB checkpoint from leader and reload the SCM state from
+   * it.
+   * @param leaderId leader id.
+   * @return
+   */
+  TermIndex installSnapshotFromLeader(String leaderId);
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
new file mode 100644
index 0000000..4835d03
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBTransactionBufferImpl;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.utils.HAUtils;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneSecurityUtil;
+import org.apache.hadoop.ozone.util.ExitManager;
+import org.apache.ratis.server.protocol.TermIndex;
+import org.apache.ratis.util.FileUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+
+/**
+ * SCMHAManagerImpl uses Apache Ratis for HA implementation. We will have 2N+1
+ * node Ratis ring. The Ratis ring will have one Leader node and 2N follower
+ * nodes.
+ *
+ * TODO
+ *
+ */
+public class SCMHAManagerImpl implements SCMHAManager {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMHAManagerImpl.class);
+
+  private final SCMRatisServer ratisServer;
+  private final ConfigurationSource conf;
+  private final DBTransactionBuffer transactionBuffer;
+  private final SCMSnapshotProvider scmSnapshotProvider;
+  private final StorageContainerManager scm;
+  private ExitManager exitManager;
+
+  // this should ideally be started only in a ratis leader
+  private final InterSCMGrpcProtocolService grpcServer;
+
+  /**
+   * Creates SCMHAManager instance.
+   */
+  public SCMHAManagerImpl(final ConfigurationSource conf,
+      final StorageContainerManager scm) throws IOException {
+    this.conf = conf;
+    this.scm = scm;
+    if (SCMHAUtils.isSCMHAEnabled(conf)) {
+      this.transactionBuffer = new SCMHADBTransactionBufferImpl(scm);
+      this.ratisServer = new SCMRatisServerImpl(conf, scm,
+          (SCMHADBTransactionBuffer) transactionBuffer);
+      this.scmSnapshotProvider = new SCMSnapshotProvider(conf,
+          scm.getSCMHANodeDetails().getPeerNodeDetails());
+      grpcServer = new InterSCMGrpcProtocolService(conf, scm);
+    } else {
+      this.transactionBuffer = new SCMDBTransactionBufferImpl();
+      this.scmSnapshotProvider = null;
+      this.grpcServer = null;
+      this.ratisServer = null;
+    }
+
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void start() throws IOException {
+    if (ratisServer == null) {
+      return;
+    }
+    ratisServer.start();
+    if (ratisServer.getDivision().getGroup().getPeers().isEmpty()) {
+      // this is a bootstrapped node
+      // It will first try to add itself to existing ring
+      boolean success = HAUtils.addSCM(OzoneConfiguration.of(conf),
+          new AddSCMRequest.Builder().setClusterId(scm.getClusterId())
+              .setScmId(scm.getScmId())
+              .setRatisAddr(scm.getSCMHANodeDetails().getLocalNodeDetails()
+                  // TODO : Should we use IP instead of hostname??
+                  .getRatisHostPortStr()).build(), scm.getSCMNodeId());
+      if (!success) {
+        throw new IOException("Adding SCM to existing HA group failed");
+      }
+    } else {
+      LOG.info(" scm role is {} peers {}",
+          ratisServer.getDivision().getInfo().getCurrentRole(),
+          ratisServer.getDivision().getGroup().getPeers());
+    }
+    grpcServer.start();
+  }
+
+  public SCMRatisServer getRatisServer() {
+    return ratisServer;
+  }
+
+  @Override
+  public DBTransactionBuffer getDBTransactionBuffer() {
+    return transactionBuffer;
+  }
+
+  @Override
+  public SCMSnapshotProvider getSCMSnapshotProvider() {
+    return scmSnapshotProvider;
+  }
+
+  @Override
+  public SCMHADBTransactionBuffer asSCMHADBTransactionBuffer() {
+    Preconditions
+        .checkArgument(transactionBuffer instanceof SCMHADBTransactionBuffer);
+    return (SCMHADBTransactionBuffer)transactionBuffer;
+
+  }
+  /**
+   * Download and install latest checkpoint from leader SCM.
+   *
+   * @param leaderId peerNodeID of the leader SCM
+   * @return If checkpoint is installed successfully, return the
+   *         corresponding termIndex. Otherwise, return null.
+   */
+  public TermIndex installSnapshotFromLeader(String leaderId) {
+    if (scmSnapshotProvider == null) {
+      LOG.error("SCM Snapshot Provider is not configured as there are no peer "
+          + "nodes.");
+      return null;
+    }
+
+    DBCheckpoint dBCheckpoint = getDBCheckpointFromLeader(leaderId);
+    LOG.info("Downloaded checkpoint from Leader {} to the location {}",
+        leaderId, dBCheckpoint.getCheckpointLocation());
+
+    TermIndex termIndex = null;
+    try {
+      termIndex = installCheckpoint(leaderId, dBCheckpoint);
+    } catch (Exception ex) {
+      LOG.error("Failed to install snapshot from Leader SCM.", ex);
+    }
+    return termIndex;
+  }
+
+  /**
+   * Download the latest SCM DB checkpoint from the leader SCM.
+   *
+   * @param leaderId SCMNodeID of the leader SCM node.
+   * @return latest DB checkpoint from leader SCM.
+   */
+  private DBCheckpoint getDBCheckpointFromLeader(String leaderId) {
+    LOG.info("Downloading checkpoint from leader SCM {} and reloading state " +
+        "from the checkpoint.", leaderId);
+
+    try {
+      return scmSnapshotProvider.getSCMDBSnapshot(leaderId);
+    } catch (IOException e) {
+      LOG.error("Failed to download checkpoint from SCM leader {}", leaderId,
+          e);
+    }
+    return null;
+  }
+
+  /**
+   * Install checkpoint. If the checkpoints snapshot index is greater than
+   * SCM's last applied transaction index, then re-initialize the SCM
+   * state via this checkpoint. Before re-initializing SCM state, the SCM Ratis
+   * server should be stopped so that no new transactions can be applied.
+   */
+  @VisibleForTesting
+  public TermIndex installCheckpoint(String leaderId, DBCheckpoint dbCheckpoint)
+      throws Exception {
+
+    Path checkpointLocation = dbCheckpoint.getCheckpointLocation();
+    TransactionInfo checkpointTrxnInfo = HAUtils
+        .getTrxnInfoFromCheckpoint(OzoneConfiguration.of(conf),
+            checkpointLocation, new SCMDBDefinition());
+
+    LOG.info("Installing checkpoint with SCMTransactionInfo {}",
+        checkpointTrxnInfo);
+
+    return installCheckpoint(leaderId, checkpointLocation, checkpointTrxnInfo);
+  }
+
+  public TermIndex installCheckpoint(String leaderId, Path checkpointLocation,
+      TransactionInfo checkpointTrxnInfo) throws Exception {
+
+    File dbBackup = null;
+    TermIndex termIndex =
+        getRatisServer().getSCMStateMachine().getLastAppliedTermIndex();
+    long term = termIndex.getTerm();
+    long lastAppliedIndex = termIndex.getIndex();
+    // Check if current applied log index is smaller than the downloaded
+    // checkpoint transaction index. If yes, proceed by stopping the ratis
+    // server so that the SCM state can be re-initialized. If no then do not
+    // proceed with installSnapshot.
+    boolean canProceed = HAUtils
+        .verifyTransactionInfo(checkpointTrxnInfo, lastAppliedIndex, leaderId,
+            checkpointLocation, LOG);
+    File oldDBLocation = scm.getScmMetadataStore().getStore().getDbLocation();
+    if (canProceed) {
+      try {
+        // Stop services
+        stopServices();
+
+        // Pause the State Machine so that no new transactions can be applied.
+        // This action also clears the SCM Double Buffer so that if there
+        // are any pending transactions in the buffer, they are discarded.
+        getRatisServer().getSCMStateMachine().pause();
+      } catch (Exception e) {
+        LOG.error("Failed to stop/ pause the services. Cannot proceed with "
+            + "installing the new checkpoint.");
+        startServices();
+        throw e;
+      }
+      try {
+        dbBackup = HAUtils
+            .replaceDBWithCheckpoint(lastAppliedIndex, oldDBLocation,
+                checkpointLocation, OzoneConsts.SCM_DB_BACKUP_PREFIX);
+        term = checkpointTrxnInfo.getTerm();
+        lastAppliedIndex = checkpointTrxnInfo.getTransactionIndex();
+        LOG.info(
+            "Replaced DB with checkpoint from SCM: {}, term: {}, index: {}",
+            leaderId, term, lastAppliedIndex);
+      } catch (Exception e) {
+        LOG.error("Failed to install Snapshot from {} as SCM failed to replace"
+            + " DB with downloaded checkpoint. Reloading old SCM state.", e);
+      }
+      // Reload the DB store with the new checkpoint.
+      // Restart (unpause) the state machine and update its last applied index
+      // to the installed checkpoint's snapshot index.
+      try {
+        reloadSCMState();
+        getRatisServer().getSCMStateMachine().unpause(term, lastAppliedIndex);
+        LOG.info("Reloaded SCM state with Term: {} and Index: {}", term,
+            lastAppliedIndex);
+      } catch (Exception ex) {
+        String errorMsg =
+            "Failed to reload SCM state and instantiate services.";
+        exitManager.exitSystem(1, errorMsg, ex, LOG);
+      }
+
+      // Delete the backup DB
+      try {
+        if (dbBackup != null) {
+          FileUtils.deleteFully(dbBackup);
+        }
+      } catch (Exception e) {
+        LOG.error("Failed to delete the backup of the original DB {}",
+            dbBackup);
+      }
+    } else {
+      LOG.warn("Cannot proceed with InstallSnapshot as SCM is at TermIndex {} "
+          + "and checkpoint has lower TermIndex {}. Reloading old "
+          + "state of SCM.", termIndex, checkpointTrxnInfo.getTermIndex());
+    }
+
+    if (lastAppliedIndex != checkpointTrxnInfo.getTransactionIndex()) {
+      // Install Snapshot failed and old state was reloaded. Return null to
+      // Ratis to indicate that installation failed.
+      return null;
+    }
+
+    TermIndex newTermIndex = TermIndex.valueOf(term, lastAppliedIndex);
+    return newTermIndex;
+  }
+
+
+  /**
+   * Re-instantiate MetadataManager with new DB checkpoint.
+   * All the classes which use/ store MetadataManager should also be updated
+   * with the new MetadataManager instance.
+   */
+  void reloadSCMState()
+      throws IOException {
+    startServices();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void shutdown() throws IOException {
+    if (ratisServer != null) {
+      ratisServer.stop();
+      ratisServer.getSCMStateMachine().close();
+      grpcServer.stop();
+    }
+  }
+
+  @Override
+  public boolean addSCM(AddSCMRequest request) throws IOException {
+    String clusterId = scm.getClusterId();
+    if (!request.getClusterId().equals(scm.getClusterId())) {
+      throw new IOException(
+          "SCM " + request.getScmId() + " with addr " + request.getRatisAddr()
+              + " has cluster Id " + request.getClusterId()
+              + " but leader SCM cluster id is " + clusterId);
+    }
+    Preconditions.checkNotNull(
+        getRatisServer().getDivision().getGroup().getGroupId());
+    return getRatisServer().addSCM(request);
+  }
+
+  void stopServices() throws Exception {
+
+    // just stop the SCMMetaData store. All other background
+    // services will be in pausing state in the follower.
+    scm.getScmMetadataStore().stop();
+  }
+
+  @VisibleForTesting
+   public void startServices() throws IOException {
+
+   // TODO: Fix the metrics ??
+    final SCMMetadataStore metadataStore = scm.getScmMetadataStore();
+    metadataStore.start(OzoneConfiguration.of(conf));
+    scm.getPipelineManager().reinitialize(metadataStore.getPipelineTable());
+    scm.getContainerManager().reinitialize(metadataStore.getContainerTable());
+    scm.getScmBlockManager().getDeletedBlockLog().reinitialize(
+        metadataStore.getDeletedBlocksTXTable());
+    if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
+      scm.getCertificateServer().reinitialize(metadataStore);
+    }
+  }
+
+  @VisibleForTesting
+  public void setExitManagerForTesting(ExitManager exitManagerForTesting) {
+    this.exitManager = exitManagerForTesting;
+  }
+
+  @VisibleForTesting
+  public static Logger getLogger() {
+    return LOG;
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
new file mode 100644
index 0000000..b4d83e0
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmUtils;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
+import org.apache.hadoop.ozone.ha.ConfUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_DIRS;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTPS_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY;
+
+public class SCMHANodeDetails {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SCMHANodeDetails.class);
+
+  private final SCMNodeDetails localNodeDetails;
+  private final List<SCMNodeDetails> peerNodeDetails;
+
+  private static String[] nodeSpecificConfigKeys = new String[] {
+      OZONE_SCM_DATANODE_ADDRESS_KEY,
+      OZONE_SCM_DATANODE_PORT_KEY,
+      OZONE_SCM_DATANODE_BIND_HOST_KEY,
+      OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+      OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+      OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY,
+      OZONE_SCM_CLIENT_ADDRESS_KEY,
+      OZONE_SCM_CLIENT_PORT_KEY,
+      OZONE_SCM_CLIENT_BIND_HOST_KEY,
+      OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
+      OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
+      OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY,
+      OZONE_SCM_RATIS_PORT_KEY,
+      OZONE_SCM_GRPC_PORT_KEY,
+      OZONE_SCM_HTTP_BIND_HOST_KEY,
+      OZONE_SCM_HTTPS_BIND_HOST_KEY,
+      OZONE_SCM_HTTP_ADDRESS_KEY,
+      OZONE_SCM_HTTPS_ADDRESS_KEY,
+      OZONE_SCM_DB_DIRS,
+      OZONE_SCM_ADDRESS_KEY
+  };
+
+  public SCMHANodeDetails(SCMNodeDetails localNodeDetails,
+      List<SCMNodeDetails> peerNodeDetails) {
+    this.localNodeDetails = localNodeDetails;
+    this.peerNodeDetails = peerNodeDetails;
+  }
+
+  public SCMNodeDetails getLocalNodeDetails() {
+    return localNodeDetails;
+  }
+
+  public List< SCMNodeDetails > getPeerNodeDetails() {
+    return peerNodeDetails;
+  }
+
+  public static SCMHANodeDetails loadDefaultConfig(
+      OzoneConfiguration conf) throws IOException {
+    int ratisPort = conf.getInt(
+        ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY,
+        ScmConfigKeys.OZONE_SCM_RATIS_PORT_DEFAULT);
+    int grpcPort = conf.getInt(
+        ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY,
+        ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT);
+    InetSocketAddress rpcAddress = new InetSocketAddress(
+        InetAddress.getLocalHost(), 0);
+    SCMNodeDetails scmNodeDetails = new SCMNodeDetails.Builder()
+        .setRatisPort(ratisPort)
+        .setGrpcPort(grpcPort)
+        .setRpcAddress(rpcAddress)
+        .setDatanodeProtocolServerAddress(
+            HddsServerUtil.getScmDataNodeBindAddress(conf))
+        .setDatanodeAddressKey(OZONE_SCM_DATANODE_ADDRESS_KEY)
+        .setBlockProtocolServerAddress(
+            HddsServerUtil.getScmBlockClientBindAddress(conf))
+        .setBlockProtocolServerAddressKey(
+            ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY)
+        .setClientProtocolServerAddress(
+            HddsServerUtil.getScmClientBindAddress(conf))
+        .setClientProtocolServerAddressKey(
+            ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY)
+        .build();
+    return new SCMHANodeDetails(scmNodeDetails, Collections.emptyList());
+  }
+
+  public static SCMHANodeDetails loadSCMHAConfig(OzoneConfiguration conf)
+      throws IOException {
+    InetSocketAddress localRpcAddress = null;
+    String localScmServiceId = null;
+    String localScmNodeId = null;
+    int localRatisPort = 0;
+    int localGrpcPort = 0;
+
+    Collection<String> scmServiceIds;
+
+    localScmServiceId = conf.getTrimmed(
+        ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID);
+
+    LOG.info("ServiceID for StorageContainerManager is {}", localScmServiceId);
+
+    if (localScmServiceId == null) {
+      // There is no internal scm service id is being set, fall back to ozone
+      // .scm.service.ids.
+      LOG.info("{} is not defined, falling back to {} to find serviceID for "
+              + "StorageContainerManager if it is HA enabled cluster",
+          OZONE_SCM_DEFAULT_SERVICE_ID, OZONE_SCM_SERVICE_IDS_KEY);
+      scmServiceIds = conf.getTrimmedStringCollection(
+          OZONE_SCM_SERVICE_IDS_KEY);
+    } else {
+      LOG.info("ServiceID for StorageContainerManager is {}",
+          localScmServiceId);
+      scmServiceIds = Collections.singleton(localScmServiceId);
+    }
+
+    localScmNodeId = conf.get(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY);
+    int found = 0;
+    boolean isSCMddressSet = false;
+
+    for (String serviceId : scmServiceIds) {
+      Collection<String> scmNodeIds = SCMHAUtils.getSCMNodeIds(conf, serviceId);
+
+      // TODO: need to fall back to ozone.scm.names in case scm node ids are
+      // not defined.
+      if (scmNodeIds.size() == 0) {
+        throw new IllegalArgumentException(
+            String.format("Configuration does not have any value set for %s " +
+                "for the service %s. List of SCM Node ID's should be " +
+                "specified for an SCM service",
+                ScmConfigKeys.OZONE_SCM_NODES_KEY, serviceId));
+      }
+      // TODO: load Ratis peers configuration
+      boolean isPeer;
+      List<SCMNodeDetails> peerNodesList = new ArrayList<>();
+      for (String nodeId : scmNodeIds) {
+        if (localScmNodeId != null && !localScmNodeId.equals(nodeId)) {
+          isPeer = true;
+        } else {
+          isPeer = false;
+        }
+
+        String rpcAddrKey = ConfUtils.addKeySuffixes(
+            OZONE_SCM_ADDRESS_KEY, serviceId, nodeId);
+        String rpcAddrStr = conf.get(rpcAddrKey);
+        if (rpcAddrStr == null || rpcAddrStr.isEmpty()) {
+          throwConfException("Configuration does not have any value set for " +
+              "%s. SCM RPC Address should be set for all nodes in a SCM " +
+              "service.", rpcAddrKey);
+        }
+        isSCMddressSet = true;
+
+        String ratisPortKey = ConfUtils.addKeySuffixes(OZONE_SCM_RATIS_PORT_KEY,
+            serviceId, nodeId);
+        int ratisPort = conf.getInt(ratisPortKey, OZONE_SCM_RATIS_PORT_DEFAULT);
+
+        String grpcPortKey = ConfUtils
+            .addKeySuffixes(ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, serviceId,
+                nodeId);
+        int grpcPort = conf.getInt(grpcPortKey, OZONE_SCM_GRPC_PORT_DEFAULT);
+
+        InetSocketAddress addr = null;
+        try {
+          addr = NetUtils.createSocketAddr(rpcAddrStr, ratisPort);
+        } catch (Exception e) {
+          LOG.error("Couldn't create socket address for SCM {} : {}", nodeId,
+              rpcAddrStr, e);
+          throw e;
+        }
+
+        if (addr.isUnresolved()) {
+          LOG.error("Address for SCM {} : {} couldn't be resolved. Proceeding "
+                  + "with unresolved host to create Ratis ring.", nodeId,
+              rpcAddrStr);
+        }
+
+        if (!addr.isUnresolved() && !isPeer && ConfUtils.isAddressLocal(addr)) {
+          localRpcAddress = addr;
+          localScmServiceId = serviceId;
+          localScmNodeId = nodeId;
+          localRatisPort = ratisPort;
+          localGrpcPort = grpcPort;
+          found++;
+        } else {
+          peerNodesList.add(getHASCMNodeDetails(conf, serviceId,
+              nodeId, addr, ratisPort, grpcPort));
+        }
+      }
+
+      if (found == 1) {
+        LOG.info("Found matching SCM address with SCMServiceId: {}, " +
+                "SCMNodeId: {}, RPC Address: {} and Ratis port: {}",
+            localScmServiceId, localScmNodeId,
+            NetUtils.getHostPortString(localRpcAddress), localRatisPort);
+
+        // Set SCM node specific config keys.
+        ConfUtils.setNodeSpecificConfigs(nodeSpecificConfigKeys, conf,
+            localScmServiceId, localScmNodeId, LOG);
+
+        return new SCMHANodeDetails(
+            getHASCMNodeDetails(conf, localScmServiceId, localScmNodeId,
+                localRpcAddress, localRatisPort, localGrpcPort), peerNodesList);
+
+      } else if (found > 1) {
+        throwConfException("Configuration has multiple %s addresses that " +
+                "match local node's address. Please configure the system " +
+                "with %s and %s", OZONE_SCM_ADDRESS_KEY,
+            OZONE_SCM_SERVICE_IDS_KEY, OZONE_SCM_ADDRESS_KEY);
+      }
+    }
+
+    if (!isSCMddressSet) {
+      // If HA config is not set, fall back to default configuration
+      return loadDefaultConfig(conf);
+    } else {
+      return null;
+    }
+  }
+
+  public static SCMNodeDetails getHASCMNodeDetails(OzoneConfiguration conf,
+      String localScmServiceId, String localScmNodeId,
+      InetSocketAddress rpcAddress, int ratisPort, int grpcPort) {
+    Preconditions.checkNotNull(localScmServiceId);
+    Preconditions.checkNotNull(localScmNodeId);
+
+    SCMNodeDetails.Builder builder = new SCMNodeDetails.Builder();
+    builder
+        .setRpcAddress(rpcAddress)
+        .setRatisPort(ratisPort)
+        .setGrpcPort(grpcPort)
+        .setSCMServiceId(localScmServiceId)
+        .setSCMNodeId(localScmNodeId)
+        .setBlockProtocolServerAddress(
+            ScmUtils.getScmBlockProtocolServerAddress(
+            conf, localScmServiceId, localScmNodeId))
+        .setBlockProtocolServerAddressKey(
+            ScmUtils.getScmBlockProtocolServerAddressKey(
+                localScmServiceId, localScmNodeId))
+        .setClientProtocolServerAddress(
+            ScmUtils.getClientProtocolServerAddress(conf,
+            localScmServiceId, localScmNodeId))
+        .setClientProtocolServerAddressKey(
+            ScmUtils.getClientProtocolServerAddressKey(localScmServiceId,
+                localScmNodeId))
+        .setDatanodeProtocolServerAddress(
+            ScmUtils.getScmDataNodeBindAddress(conf, localScmServiceId,
+                localScmNodeId))
+        .setDatanodeAddressKey(
+            ScmUtils.getScmDataNodeBindAddressKey(localScmServiceId,
+                localScmNodeId));
+
+    return builder.build();
+  }
+
+  private static void throwConfException(String message, String... arguments)
+      throws IllegalArgumentException {
+    String exceptionMsg = String.format(message, arguments);
+    LOG.error(exceptionMsg);
+    throw new OzoneIllegalArgumentException(exceptionMsg);
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java
new file mode 100644
index 0000000..3f56e6a
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.ha.NodeDetails;
+import org.apache.ratis.protocol.RaftGroup;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+
+/**
+ * Construct SCM node details.
+ */
+public final class SCMNodeDetails extends NodeDetails {
+  private InetSocketAddress blockProtocolServerAddress;
+  private String blockProtocolServerAddressKey;
+  private InetSocketAddress clientProtocolServerAddress;
+  private String clientProtocolServerAddressKey;
+  private InetSocketAddress datanodeProtocolServerAddress;
+  private String datanodeAddressKey;
+  private int grpcPort;
+  public static final Logger LOG =
+      LoggerFactory.getLogger(SCMNodeDetails.class);
+
+  /**
+   * Constructs SCMNodeDetails object.
+   */
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private SCMNodeDetails(String serviceId, String nodeId,
+      InetSocketAddress rpcAddr, int ratisPort, int grpcPort,
+      String httpAddress, String httpsAddress,
+      InetSocketAddress blockProtocolServerAddress,
+      InetSocketAddress clientProtocolServerAddress,
+      InetSocketAddress datanodeProtocolServerAddress, RaftGroup group,
+      RaftPeerId selfPeerId, String datanodeAddressKey,
+      String blockProtocolServerAddressKey,
+      String clientProtocolServerAddressAddressKey) {
+    super(serviceId, nodeId, rpcAddr, ratisPort,
+        httpAddress, httpsAddress);
+    this.grpcPort = grpcPort;
+    this.blockProtocolServerAddress = blockProtocolServerAddress;
+    this.clientProtocolServerAddress = clientProtocolServerAddress;
+    this.datanodeProtocolServerAddress = datanodeProtocolServerAddress;
+    this.datanodeAddressKey = datanodeAddressKey;
+    this.blockProtocolServerAddressKey = blockProtocolServerAddressKey;
+    this.clientProtocolServerAddressKey = clientProtocolServerAddressAddressKey;
+  }
+
+  @Override
+  public String toString() {
+    return "SCMNodeDetails["
+        + "scmServiceId=" + getServiceId() +
+        ", scmNodeId=" + getNodeId() +
+        ", rpcAddress=" + getRpcAddressString() +
+        ", ratisPort=" + getRatisPort() +
+        ", httpAddress=" + getHttpAddress() +
+        ", httpsAddress=" + getHttpsAddress() +
+        ", blockProtocolServerAddress=" + getBlockProtocolServerAddress() +
+        ", clientProtocolServerAddress=" + getClientProtocolServerAddress() +
+        ", datanodeProtocolServerAddress=" + getDatanodeProtocolServerAddress()
+        + "]";
+  }
+
+  /**
+   * Builder class for SCMNodeDetails.
+   */
+  public static class Builder {
+    private String scmServiceId;
+    private String scmNodeId;
+    private InetSocketAddress rpcAddress;
+    private int ratisPort;
+    private int grpcPort;
+    private String httpAddr;
+    private String httpsAddr;
+    private InetSocketAddress blockProtocolServerAddress;
+    private String blockProtocolServerAddressKey;
+    private InetSocketAddress clientProtocolServerAddress;
+    private String clientProtocolServerAddressKey;
+    private InetSocketAddress datanodeProtocolServerAddress;
+    private String datanodeAddressKey;
+    private RaftGroup raftGroup;
+    private RaftPeerId selfPeerId;
+
+    public Builder setDatanodeAddressKey(String addressKey) {
+      this.datanodeAddressKey = addressKey;
+      return this;
+    }
+
+    public Builder setBlockProtocolServerAddressKey(String addressKey) {
+      this.blockProtocolServerAddressKey = addressKey;
+      return this;
+    }
+
+    public Builder setBlockProtocolServerAddress(InetSocketAddress address) {
+      this.blockProtocolServerAddress = address;
+      return this;
+    }
+
+    public Builder setClientProtocolServerAddress(InetSocketAddress address) {
+      this.clientProtocolServerAddress = address;
+      return this;
+    }
+
+    public Builder setClientProtocolServerAddressKey(String addressKey) {
+      this.clientProtocolServerAddressKey = addressKey;
+      return this;
+    }
+
+    public Builder setDatanodeProtocolServerAddress(InetSocketAddress address) {
+      this.datanodeProtocolServerAddress = address;
+      return this;
+    }
+
+    public Builder setRaftGroup(RaftGroup group) {
+      this.raftGroup = group;
+      return this;
+    }
+
+    public Builder setSelfPeerId(RaftPeerId peerId) {
+      this.selfPeerId = peerId;
+      return this;
+    }
+
+    public Builder setRpcAddress(InetSocketAddress rpcAddr) {
+      this.rpcAddress = rpcAddr;
+      return this;
+    }
+
+    public Builder setRatisPort(int port) {
+      this.ratisPort = port;
+      return this;
+    }
+
+    public Builder setGrpcPort(int port) {
+      this.grpcPort = port;
+      return this;
+    }
+
+    public Builder setSCMServiceId(String serviceId) {
+      this.scmServiceId = serviceId;
+      return this;
+    }
+
+    public Builder setSCMNodeId(String nodeId) {
+      this.scmNodeId = nodeId;
+      return this;
+    }
+
+    public Builder setHttpAddress(String httpAddress) {
+      this.httpAddr = httpAddress;
+      return this;
+    }
+
+    public Builder setHttpsAddress(String httpsAddress) {
+      this.httpsAddr = httpsAddress;
+      return this;
+    }
+
+    public SCMNodeDetails build() {
+      return new SCMNodeDetails(scmServiceId, scmNodeId, rpcAddress,
+          ratisPort, grpcPort, httpAddr, httpsAddr, blockProtocolServerAddress,
+          clientProtocolServerAddress, datanodeProtocolServerAddress,
+          raftGroup, selfPeerId, datanodeAddressKey,
+          blockProtocolServerAddressKey, clientProtocolServerAddressKey);
+    }
+  }
+
+  public String getRpcAddressString() {
+    return NetUtils.getHostPortString(getRpcAddress());
+  }
+
+  public InetSocketAddress getClientProtocolServerAddress() {
+    return clientProtocolServerAddress;
+  }
+
+  public String getClientProtocolServerAddressKey() {
+    return clientProtocolServerAddressKey;
+  }
+
+  public InetSocketAddress getBlockProtocolServerAddress() {
+    return blockProtocolServerAddress;
+  }
+
+  public String getBlockProtocolServerAddressKey() {
+    return blockProtocolServerAddressKey;
+  }
+
+  public InetSocketAddress getDatanodeProtocolServerAddress() {
+    return datanodeProtocolServerAddress;
+  }
+
+  public String getDatanodeAddressKey() {
+    return datanodeAddressKey;
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java
new file mode 100644
index 0000000..da8fadf
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import org.apache.hadoop.hdds.scm.ha.io.CodecFactory;
+import org.apache.ratis.protocol.Message;
+
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.Method;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.MethodArgument;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.SCMRatisRequestProto;
+
+
+/**
+ * Represents the request that is sent to RatisServer.
+ */
+public final class SCMRatisRequest {
+
+  private final RequestType type;
+  private final String operation;
+  private final Object[] arguments;
+  private final Class<?>[] parameterTypes;
+
+  private SCMRatisRequest(final RequestType type, final String operation,
+      final Class<?>[] parameterTypes, final Object... arguments) {
+    this.type = type;
+    this.operation = operation;
+    this.parameterTypes = parameterTypes;
+    this.arguments = arguments;
+  }
+
+  public static SCMRatisRequest of(final RequestType type,
+      final String operation,
+      final Class<?>[] parameterTypes,
+      final Object... arguments) {
+    Preconditions.checkState(parameterTypes.length == arguments.length);
+    return new SCMRatisRequest(type, operation, parameterTypes, arguments);
+  }
+
+  /**
+   * Returns the type of request.
+   */
+  public RequestType getType() {
+    return type;
+  }
+
+  /**
+   * Returns the operation that this request represents.
+   */
+  public String getOperation() {
+    return operation;
+  }
+
+  /**
+   * Returns the arguments encoded in the request.
+   */
+  public Object[] getArguments() {
+    return arguments.clone();
+  }
+
+  public Class<?>[] getParameterTypes() {
+    return parameterTypes.clone();
+  }
+  /**
+   * Encodes the request into Ratis Message.
+   */
+  public Message encode() throws InvalidProtocolBufferException {
+    final SCMRatisRequestProto.Builder requestProtoBuilder =
+        SCMRatisRequestProto.newBuilder();
+    requestProtoBuilder.setType(type);
+
+    final Method.Builder methodBuilder = Method.newBuilder();
+    methodBuilder.setName(operation);
+
+    final List<MethodArgument> args = new ArrayList<>();
+
+    int paramCounter = 0;
+    for (Object argument : arguments) {
+      final MethodArgument.Builder argBuilder = MethodArgument.newBuilder();
+      // Set actual method parameter type, not actual argument type.
+      // This is done to avoid MethodNotFoundException in case if argument is
+      // subclass type, where as method is defined with super class type.
+      argBuilder.setType(parameterTypes[paramCounter++].getName());
+      argBuilder.setValue(CodecFactory.getCodec(argument.getClass())
+          .serialize(argument));
+      args.add(argBuilder.build());
+    }
+    methodBuilder.addAllArgs(args);
+    requestProtoBuilder.setMethod(methodBuilder.build());
+    return Message.valueOf(
+        org.apache.ratis.thirdparty.com.google.protobuf.ByteString.copyFrom(
+            requestProtoBuilder.build().toByteArray()));
+  }
+
+  /**
+   * Decodes the request from Ratis Message.
+   */
+  public static SCMRatisRequest decode(Message message)
+      throws InvalidProtocolBufferException {
+    final SCMRatisRequestProto requestProto =
+        SCMRatisRequestProto.parseFrom(message.getContent().toByteArray());
+    final Method method = requestProto.getMethod();
+    List<Object> args = new ArrayList<>();
+    Class<?>[] parameterTypes = new Class[method.getArgsCount()];
+    int paramCounter = 0;
+    for (MethodArgument argument : method.getArgsList()) {
+      try {
+        final Class<?> clazz = ReflectionUtil.getClass(argument.getType());
+        parameterTypes[paramCounter++] = clazz;
+        args.add(CodecFactory.getCodec(clazz)
+            .deserialize(clazz, argument.getValue()));
+      } catch (ClassNotFoundException ex) {
+        throw new InvalidProtocolBufferException(argument.getType() +
+            " cannot be decoded!" + ex.getMessage());
+      }
+    }
+    return new SCMRatisRequest(requestProto.getType(),
+        method.getName(), parameterTypes, args.toArray());
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java
new file mode 100644
index 0000000..15163bf
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.SCMRatisResponseProto;
+import org.apache.hadoop.hdds.scm.ha.io.CodecFactory;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientReply;
+
+/**
+ * Represents the response from RatisServer.
+ */
+public final class SCMRatisResponse {
+
+  private final boolean success;
+  private final Object result;
+  private final Exception exception;
+
+  private SCMRatisResponse() {
+    this(true, null, null);
+  }
+
+  private SCMRatisResponse(final Object result) {
+    this(true, result, null);
+  }
+
+  private SCMRatisResponse(final Exception exception) {
+    this(false, null, exception);
+  }
+
+  private SCMRatisResponse(final boolean success, final Object result,
+                           final Exception exception) {
+    this.success = success;
+    this.result = result;
+    this.exception = exception;
+  }
+
+  public boolean isSuccess() {
+    return success;
+  }
+
+  public Object getResult() {
+    return result;
+  }
+
+  public Exception getException() {
+    return exception;
+  }
+
+  public static Message encode(final Object result)
+      throws InvalidProtocolBufferException {
+
+    if (result == null) {
+      return Message.EMPTY;
+    }
+
+    final Class<?> type = result.getClass();
+    final ByteString value = CodecFactory.getCodec(type).serialize(result);
+
+    final SCMRatisResponseProto response = SCMRatisResponseProto.newBuilder()
+        .setType(type.getName()).setValue(value).build();
+    return Message.valueOf(
+        org.apache.ratis.thirdparty.com.google.protobuf.ByteString.copyFrom(
+            response.toByteArray()));
+  }
+
+  public static SCMRatisResponse decode(RaftClientReply reply)
+      throws InvalidProtocolBufferException {
+    if (!reply.isSuccess()) {
+      return new SCMRatisResponse(reply.getException());
+    }
+
+    final byte[] response = reply.getMessage().getContent().toByteArray();
+
+    if (response.length == 0) {
+      return new SCMRatisResponse();
+    }
+
+    final SCMRatisResponseProto responseProto = SCMRatisResponseProto
+        .parseFrom(response);
+
+    try {
+      final Class<?> type = ReflectionUtil.getClass(responseProto.getType());
+      return new SCMRatisResponse(CodecFactory.getCodec(type)
+          .deserialize(type, responseProto.getValue()));
+    } catch (ClassNotFoundException e) {
+      throw new InvalidProtocolBufferException(responseProto.getType() +
+          " cannot be decoded!" + e.getMessage());
+    }
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java
new file mode 100644
index 0000000..b351c86
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.apache.ratis.server.RaftServer;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+/**
+ * TODO.
+ */
+public interface SCMRatisServer {
+
+  void start() throws IOException;
+
+  void registerStateMachineHandler(RequestType handlerType, Object handler);
+
+  SCMRatisResponse submitRequest(SCMRatisRequest request)
+      throws IOException, ExecutionException, InterruptedException;
+
+  void stop() throws IOException;
+
+  RaftServer.Division getDivision();
+
+  /**
+   * Returns roles of ratis peers.
+   */
+  List<String> getRatisRoles();
+
+  /**
+   * Returns NotLeaderException with useful info.
+   */
+  NotLeaderException triggerNotLeaderException();
+
+  boolean addSCM(AddSCMRequest request) throws IOException;
+
+  SCMStateMachine getSCMStateMachine();
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
new file mode 100644
index 0000000..79da583
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
@@ -0,0 +1,301 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+import java.util.Iterator;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.conf.RaftProperties;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.RaftClientReply;
+import org.apache.ratis.protocol.RaftClientRequest;
+import org.apache.ratis.protocol.RaftGroup;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.RaftPeer;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.apache.ratis.protocol.SetConfigurationRequest;
+import org.apache.ratis.server.RaftServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * TODO.
+ */
+public class SCMRatisServerImpl implements SCMRatisServer {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMRatisServerImpl.class);
+
+  private final RaftServer server;
+  private final SCMStateMachine stateMachine;
+  private final StorageContainerManager scm;
+  private final ClientId clientId = ClientId.randomId();
+  private final AtomicLong callId = new AtomicLong();
+  private final RaftServer.Division division;
+
+  // TODO: Refactor and remove ConfigurationSource and use only
+  //  SCMHAConfiguration.
+  SCMRatisServerImpl(final ConfigurationSource conf,
+      final StorageContainerManager scm, final SCMHADBTransactionBuffer buffer)
+      throws IOException {
+    this.scm = scm;
+    this.stateMachine = new SCMStateMachine(scm, this, buffer);
+    final RaftGroupId groupId = buildRaftGroupId(scm.getClusterId());
+    LOG.info("starting Raft server for scm:{}", scm.getScmId());
+    // During SCM startup, the bootstrapped node will be started just with
+    // groupId information, so that it won't trigger any leader election
+    // as it doesn't have any peer info.
+
+    // The primary SCM node which is initialized using scm --init command,
+    // will initialize the raft server with the peer info and it will be
+    // persisted in the raft log post leader election. Now, when the primary
+    // scm boots up, it has peer info embedded in the raft log and will
+    // trigger leader election.
+    this.server =
+        newRaftServer(scm.getScmId(), conf).setStateMachine(stateMachine)
+            .setGroup(RaftGroup.valueOf(groupId)).build();
+    this.division = server.getDivision(groupId);
+  }
+
+  public static void initialize(String clusterId, String scmId,
+      SCMNodeDetails details, OzoneConfiguration conf) throws IOException {
+    final RaftGroup group = buildRaftGroup(details, scmId, clusterId);
+    RaftServer server = null;
+    try {
+      server = newRaftServer(scmId, conf).setGroup(group).build();
+      server.start();
+      waitForLeaderToBeReady(server, conf, group);
+    } finally {
+      if (server != null) {
+        server.close();
+      }
+    }
+  }
+
+  public static void reinitialize(String clusterId, String scmId,
+      SCMNodeDetails details, OzoneConfiguration conf) throws IOException {
+    RaftServer server = null;
+    try {
+      server = newRaftServer(scmId, conf).build();
+      RaftGroup group = null;
+      Iterator<RaftGroup> iter = server.getGroups().iterator();
+      if (iter.hasNext()) {
+        group = iter.next();
+      }
+      if (group != null && group.getGroupId()
+          .equals(buildRaftGroupId(clusterId))) {
+        LOG.info("Ratis group with group Id {} already exists.",
+            group.getGroupId());
+        return;
+      } else {
+        // close the server instance so that pending locks on raft storage
+        // directory gets released if any and further initiliaze can succeed.
+        server.close();
+        initialize(clusterId, scmId, details, conf);
+      }
+    } finally {
+      if (server != null) {
+        server.close();
+      }
+    }
+  }
+
+  private static void waitForLeaderToBeReady(RaftServer server,
+      OzoneConfiguration conf, RaftGroup group) throws IOException {
+    boolean ready;
+    long st = Time.monotonicNow();
+    final SCMHAConfiguration haConf = conf.getObject(SCMHAConfiguration.class);
+    long waitTimeout = haConf.getLeaderReadyWaitTimeout();
+    long retryInterval = haConf.getLeaderReadyCheckInterval();
+
+    do {
+      ready = server.getDivision(group.getGroupId()).getInfo().isLeaderReady();
+      if (!ready) {
+        try {
+          Thread.sleep(retryInterval);
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+        }
+      }
+    } while (!ready && Time.monotonicNow() - st < waitTimeout);
+
+    if (!ready) {
+      throw new IOException(String
+          .format("Ratis group %s is not ready in %d ms", group.getGroupId(),
+                  waitTimeout));
+    }
+  }
+
+  private static RaftServer.Builder newRaftServer(final String scmId,
+      final ConfigurationSource conf) {
+    final SCMHAConfiguration haConf = conf.getObject(SCMHAConfiguration.class);
+    final RaftProperties serverProperties =
+        RatisUtil.newRaftProperties(haConf, conf);
+    return RaftServer.newBuilder().setServerId(RaftPeerId.getRaftPeerId(scmId))
+        .setProperties(serverProperties)
+        .setStateMachine(new SCMStateMachine());
+  }
+
+  @Override
+  public void start() throws IOException {
+    LOG.info("starting ratis server {}", server.getPeer().getAddress());
+    server.start();
+  }
+
+  @Override
+  public RaftServer.Division getDivision() {
+    return division;
+  }
+
+  @VisibleForTesting
+  public SCMStateMachine getStateMachine() {
+    return stateMachine;
+  }
+
+  @Override
+  public SCMStateMachine getSCMStateMachine() {
+    return stateMachine;
+  }
+
+  @Override
+  public void registerStateMachineHandler(final RequestType handlerType,
+                                          final Object handler) {
+    stateMachine.registerHandler(handlerType, handler);
+  }
+
+  @Override
+  public SCMRatisResponse submitRequest(SCMRatisRequest request)
+      throws IOException, ExecutionException, InterruptedException {
+    final RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder()
+        .setClientId(clientId)
+        .setServerId(getDivision().getId())
+        .setGroupId(getDivision().getGroup().getGroupId())
+        .setCallId(nextCallId())
+        .setMessage(request.encode())
+        .setType(RaftClientRequest.writeRequestType())
+        .build();
+    final RaftClientReply raftClientReply =
+        server.submitClientRequestAsync(raftClientRequest).get();
+    if (LOG.isDebugEnabled()) {
+      LOG.info("request {} Reply {}", raftClientRequest, raftClientReply);
+    }
+    return SCMRatisResponse.decode(raftClientReply);
+  }
+
+  private long nextCallId() {
+    return callId.getAndIncrement() & Long.MAX_VALUE;
+  }
+
+  @Override
+  public void stop() throws IOException {
+    LOG.info("stopping ratis server {}", server.getPeer().getAddress());
+    server.close();
+  }
+
+  @Override
+  public List<String> getRatisRoles() {
+    return division.getGroup().getPeers().stream()
+        .map(peer -> peer.getAddress() == null ? "" : peer.getAddress())
+        .collect(Collectors.toList());
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public NotLeaderException triggerNotLeaderException() {
+    return new NotLeaderException(
+        division.getMemberId(), null, division.getGroup().getPeers());
+  }
+
+  @Override
+  public boolean addSCM(AddSCMRequest request) throws IOException {
+    List<RaftPeer> newRaftPeerList =
+        new ArrayList<>(getDivision().getGroup().getPeers());
+    // add the SCM node to be added to the raft peer list
+
+    RaftPeer raftPeer = RaftPeer.newBuilder().setId(request.getScmId())
+        .setAddress(request.getRatisAddr()).build();
+    newRaftPeerList.add(raftPeer);
+
+    LOG.info("{}: Submitting SetConfiguration request to Ratis server with" +
+            " new SCM peers list: {}", scm.getScmId(),
+        newRaftPeerList);
+    SetConfigurationRequest configRequest =
+        new SetConfigurationRequest(clientId, division.getPeer().getId(),
+            division.getGroup().getGroupId(), nextCallId(), newRaftPeerList);
+
+    try {
+      RaftClientReply raftClientReply =
+          division.getRaftServer().setConfiguration(configRequest);
+      if (raftClientReply.isSuccess()) {
+        LOG.info("Successfully added new SCM: {}.", request.getScmId());
+      } else {
+        LOG.error("Failed to add new SCM: {}. Ratis reply: {}" +
+            request.getScmId(), raftClientReply);
+        throw new IOException(raftClientReply.getException());
+      }
+      return raftClientReply.isSuccess();
+    } catch (IOException e) {
+      LOG.error("Failed to update Ratis configuration and add new peer. " +
+          "Cannot add new SCM: {}.", scm.getScmId(), e);
+      throw e;
+    }
+  }
+
+  private static RaftGroup buildRaftGroup(SCMNodeDetails details,
+      String scmId, String clusterId) {
+    Preconditions.checkNotNull(scmId);
+    final RaftGroupId groupId = buildRaftGroupId(clusterId);
+    RaftPeerId selfPeerId = RaftPeerId.getRaftPeerId(scmId);
+
+    RaftPeer localRaftPeer = RaftPeer.newBuilder().setId(selfPeerId)
+        // TODO : Should we use IP instead of hostname??
+        .setAddress(details.getRatisHostPortStr()).build();
+
+    List<RaftPeer> raftPeers = new ArrayList<>();
+    // Add this Ratis server to the Ratis ring
+    raftPeers.add(localRaftPeer);
+    final RaftGroup group =
+        RaftGroup.valueOf(groupId, raftPeers);
+    return group;
+  }
+
+  @VisibleForTesting
+  public static RaftGroupId buildRaftGroupId(String clusterId) {
+    Preconditions.checkNotNull(clusterId);
+    return RaftGroupId.valueOf(
+        UUID.fromString(clusterId.replace(OzoneConsts.CLUSTER_ID_PREFIX, "")));
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMService.java
new file mode 100644
index 0000000..4d7c435
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMService.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+/**
+ * Interface for background services in SCM, including ReplicationManager,
+ * SCMBlockDeletingService and BackgroundPipelineCreator.
+ *
+ * Provide a fine-grained method to manipulate the status of these background
+ * services.
+ */
+public interface SCMService {
+  /**
+   * Notify raft or safe mode related status changed.
+   */
+  void notifyStatusChanged();
+
+  /**
+   * @param event latest triggered event.
+   */
+  default void notifyEventTriggered(Event event) {
+  }
+
+  /**
+   * @return true, if next iteration of Service should take effect,
+   *         false, if next iteration of Service should be skipped.
+   */
+  boolean shouldRun();
+
+  /**
+   * @return name of the Service.
+   */
+  String getServiceName();
+
+  /**
+   * Status of Service.
+   */
+  enum ServiceStatus {
+    RUNNING,
+    PAUSING
+  }
+
+  /**
+   * One time event.
+   */
+  enum Event {
+    PRE_CHECK_COMPLETED,
+    NEW_NODE_HANDLER_TRIGGERED,
+    UNHEALTHY_TO_HEALTHY_NODE_HANDLER_TRIGGERED
+  }
+
+  /**
+   * starts the SCM service.
+   */
+  void start();
+
+  /**
+   * stops the SCM service.
+   */
+  void stop();
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
new file mode 100644
index 0000000..4fbd811
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMServiceManager.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hdds.scm.ha.SCMService.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Manipulate background services in SCM, including ReplicationManager,
+ * SCMBlockDeletingService and BackgroundPipelineCreator.
+ */
+public final class SCMServiceManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMServiceManager.class);
+
+  private final List<SCMService> services = new ArrayList<>();
+
+  /**
+   * Register a SCMService to SCMServiceManager.
+   */
+  public synchronized void register(SCMService service) {
+    Preconditions.checkNotNull(service);
+    LOG.info("Registering service {}.", service.getServiceName());
+    services.add(service);
+  }
+
+  /**
+   * Notify raft or safe mode related status changed.
+   */
+  public synchronized void notifyStatusChanged() {
+    for (SCMService service : services) {
+      LOG.debug("Notify service:{}.", service.getServiceName());
+      service.notifyStatusChanged();
+    }
+  }
+
+  /**
+   * Notify event triggered, which may affect SCMService.
+   */
+  public synchronized void notifyEventTriggered(Event event) {
+    for (SCMService service : services) {
+      LOG.debug("Notify service:{} with event:{}.",
+          service.getServiceName(), event);
+      service.notifyEventTriggered(event);
+    }
+  }
+
+  /**
+   * Starts all running services.
+   */
+  public synchronized void start() {
+    for (SCMService service : services) {
+      LOG.debug("Stopping service:{}.", service.getServiceName());
+      service.start();
+    }
+  }
+
+  /**
+   * Stops all running services.
+   */
+  public synchronized void stop() {
+    for (SCMService service : services) {
+      LOG.debug("Stopping service:{}.", service.getServiceName());
+      service.stop();
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotDownloader.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotDownloader.java
new file mode 100644
index 0000000..7d5d3eb
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotDownloader.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.concurrent.CompletableFuture;
+
+/**
+ * Contract to download a SCM Snapshot from remote server..
+ * <p>
+ *
+ * The underlying implementation is supposed to download SCM snapshot via
+ * any chosen protocol(for now its Grpc).
+ * images.
+ */
+public interface SCMSnapshotDownloader {
+
+  /**
+   * Downloads the contents to the target file path.
+   *
+   * @param destination
+   * @return Future task for download progress
+   * @throws IOException
+   */
+  CompletableFuture<Path> download(Path destination) throws IOException;
+
+  void close() throws Exception;
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotProvider.java
new file mode 100644
index 0000000..093b810
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMSnapshotProvider.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
+
+import org.apache.commons.io.FileUtils;
+
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * SCMSnapshotProvider downloads the latest checkpoint from the
+ * leader SCM and loads the checkpoint into State Machine.
+ */
+public class SCMSnapshotProvider {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMSnapshotProvider.class);
+
+  private final File scmSnapshotDir;
+
+
+  private final ConfigurationSource conf;
+
+  private SCMSnapshotDownloader client;
+
+  private Map<String, SCMNodeDetails> peerNodesMap;
+
+  public SCMSnapshotProvider(ConfigurationSource conf,
+      List<SCMNodeDetails> peerNodes) {
+    LOG.info("Initializing SCM Snapshot Provider");
+    this.conf = conf;
+    // Create Ratis storage dir
+    String scmRatisDirectory = SCMHAUtils.getSCMRatisDirectory(conf);
+
+    if (scmRatisDirectory == null || scmRatisDirectory.isEmpty()) {
+      throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS +
+          " must be defined.");
+    }
+    HddsUtils.createDir(scmRatisDirectory);
+
+    // Create Ratis snapshot dir
+    scmSnapshotDir = HddsUtils.createDir(
+        SCMHAUtils.getSCMRatisSnapshotDirectory(conf));
+    if (peerNodes != null) {
+      this.peerNodesMap = new HashMap<>();
+      for (SCMNodeDetails peerNode : peerNodes) {
+        this.peerNodesMap.put(peerNode.getNodeId(), peerNode);
+      }
+    }
+    this.client = null;
+  }
+
+  @VisibleForTesting
+  public void setPeerNodesMap(Map<String, SCMNodeDetails> peerNodesMap) {
+    this.peerNodesMap = peerNodesMap;
+  }
+  /**
+   * Download the latest checkpoint from SCM Leader .
+   * @param leaderSCMNodeID leader SCM Node ID.
+   * @return the DB checkpoint (including the ratis snapshot index)
+   */
+  public DBCheckpoint getSCMDBSnapshot(String leaderSCMNodeID)
+      throws IOException {
+    String snapshotTime = Long.toString(System.currentTimeMillis());
+    String snapshotFileName =
+        OzoneConsts.SCM_DB_NAME + "-" + leaderSCMNodeID + "-" + snapshotTime;
+    String snapshotFilePath =
+        Paths.get(scmSnapshotDir.getAbsolutePath(), snapshotFileName).toFile()
+            .getAbsolutePath();
+    File targetFile = new File(snapshotFilePath + ".tar.gz");
+
+    // the client instance will be initialized only when first install snapshot
+    // notification from ratis leader will be received.
+    if (client == null) {
+      client = new InterSCMGrpcClient(
+          peerNodesMap.get(leaderSCMNodeID).getInetAddress().getHostAddress(),
+          conf);
+    }
+    try {
+      client.download(targetFile.toPath()).get();
+    } catch (InterruptedException | ExecutionException e) {
+      LOG.error("Rocks DB checkpoint downloading failed", e);
+      throw new IOException(e);
+    }
+
+
+    // Untar the checkpoint file.
+    Path untarredDbDir = Paths.get(snapshotFilePath);
+    FileUtil.unTar(targetFile, untarredDbDir.toFile());
+    FileUtils.deleteQuietly(targetFile);
+
+    LOG.info(
+        "Successfully downloaded latest checkpoint from leader SCM: {} path {}",
+        leaderSCMNodeID, untarredDbDir.toAbsolutePath());
+
+    RocksDBCheckpoint scmCheckpoint = new RocksDBCheckpoint(untarredDbDir);
+    return scmCheckpoint;
+  }
+
+  @VisibleForTesting
+  public File getScmSnapshotDir() {
+    return scmSnapshotDir;
+  }
+
+  public void stop() throws Exception {
+    if (client != null) {
+      client.close();
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
new file mode 100644
index 0000000..90ace48
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
@@ -0,0 +1,311 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.Collection;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
+import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImplV2;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.apache.ratis.proto.RaftProtos;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.RaftGroupMemberId;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.server.RaftServer;
+import org.apache.ratis.server.protocol.TermIndex;
+import org.apache.ratis.server.storage.RaftStorage;
+import org.apache.ratis.statemachine.SnapshotInfo;
+import org.apache.ratis.statemachine.StateMachine;
+import org.apache.ratis.statemachine.TransactionContext;
+import org.apache.ratis.statemachine.impl.BaseStateMachine;
+
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
+import org.apache.ratis.util.ExitUtils;
+import org.apache.ratis.util.LifeCycle;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.SCM_NOT_INITIALIZED;
+
+/**
+ * The SCMStateMachine is the state machine for SCMRatisServer. It is
+ * responsible for applying ratis committed transactions to
+ * {@link StorageContainerManager}.
+ */
+public class SCMStateMachine extends BaseStateMachine {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SCMStateMachine.class);
+
+  private StorageContainerManager scm;
+  private Map<RequestType, Object> handlers;
+  private SCMHADBTransactionBuffer transactionBuffer;
+  private final SimpleStateMachineStorage storage =
+      new SimpleStateMachineStorage();
+  private final boolean isInitialized;
+  private ExecutorService installSnapshotExecutor;
+
+  public SCMStateMachine(final StorageContainerManager scm,
+      final SCMRatisServer ratisServer, SCMHADBTransactionBuffer buffer)
+      throws SCMException {
+    this.scm = scm;
+    this.handlers = new EnumMap<>(RequestType.class);
+    this.transactionBuffer = buffer;
+    TransactionInfo latestTrxInfo = this.transactionBuffer.getLatestTrxInfo();
+    if (!latestTrxInfo.isDefault() &&
+        !updateLastAppliedTermIndex(latestTrxInfo.getTerm(),
+            latestTrxInfo.getTransactionIndex())) {
+      throw new SCMException(
+          String.format("Failed to update LastAppliedTermIndex " +
+                  "in StateMachine to term:{} index:{}",
+              latestTrxInfo.getTerm(), latestTrxInfo.getTransactionIndex()
+          ), SCM_NOT_INITIALIZED);
+    }
+    this.installSnapshotExecutor = HadoopExecutors.newSingleThreadExecutor();
+    isInitialized = true;
+  }
+
+  public SCMStateMachine() {
+    isInitialized = false;
+  }
+
+  public void registerHandler(RequestType type, Object handler) {
+    handlers.put(type, handler);
+  }
+
+  @Override
+  public SnapshotInfo getLatestSnapshot() {
+    // Transaction buffer will be null during scm initlialization phase
+    return transactionBuffer == null
+        ? null : transactionBuffer.getLatestSnapshot();
+  }
+
+  /**
+   * Initializes the State Machine with the given server, group and storage.
+   */
+  @Override
+  public void initialize(RaftServer server, RaftGroupId id,
+      RaftStorage raftStorage) throws IOException {
+    getLifeCycle().startAndTransition(() -> {
+      super.initialize(server, id, raftStorage);
+      storage.init(raftStorage);
+    });
+  }
+
+  @Override
+  public CompletableFuture<Message> applyTransaction(
+      final TransactionContext trx) {
+    final CompletableFuture<Message> applyTransactionFuture =
+        new CompletableFuture<>();
+    try {
+      final SCMRatisRequest request = SCMRatisRequest.decode(
+          Message.valueOf(trx.getStateMachineLogEntry().getLogData()));
+      applyTransactionFuture.complete(process(request));
+      transactionBuffer.updateLatestTrxInfo(TransactionInfo.builder()
+          .setCurrentTerm(trx.getLogEntry().getTerm())
+          .setTransactionIndex(trx.getLogEntry().getIndex())
+          .build());
+    } catch (Exception ex) {
+      applyTransactionFuture.completeExceptionally(ex);
+      ExitUtils.terminate(1, ex.getMessage(), ex, StateMachine.LOG);
+    }
+    return applyTransactionFuture;
+  }
+
+  private Message process(final SCMRatisRequest request) throws Exception {
+    try {
+      final Object handler = handlers.get(request.getType());
+
+      if (handler == null) {
+        throw new IOException("No handler found for request type " +
+            request.getType());
+      }
+
+      final Object result = handler.getClass().getMethod(
+          request.getOperation(), request.getParameterTypes())
+          .invoke(handler, request.getArguments());
+      return SCMRatisResponse.encode(result);
+    } catch (NoSuchMethodException | SecurityException ex) {
+      throw new InvalidProtocolBufferException(ex.getMessage());
+    } catch (InvocationTargetException e) {
+      final Exception targetEx = (Exception) e.getTargetException();
+      throw targetEx != null ? targetEx : e;
+    }
+  }
+
+  @Override
+  public void notifyNotLeader(Collection<TransactionContext> pendingEntries) {
+    if (!isInitialized) {
+      return;
+    }
+    LOG.info("current leader SCM steps down.");
+
+    scm.getScmContext().updateLeaderAndTerm(false, 0);
+    scm.getSCMServiceManager().notifyStatusChanged();
+  }
+
+  /**
+   * Leader SCM has purged entries from its log. To catch up, SCM must download
+   * the latest checkpoint from the leader SCM and install it.
+   * @param roleInfoProto the leader node information
+   * @param firstTermIndexInLog TermIndex of the first append entry available
+   *                           in the Leader's log.
+   * @return the last term index included in the installed snapshot.
+   */
+  @Override
+  public CompletableFuture<TermIndex> notifyInstallSnapshotFromLeader(
+      RaftProtos.RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) {
+
+    String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getFollowerInfo()
+        .getLeaderInfo().getId().getId()).toString();
+    LOG.info("Received install snapshot notification from SCM leader: {} with "
+        + "term index: {}", leaderNodeId, firstTermIndexInLog);
+
+    CompletableFuture<TermIndex> future = CompletableFuture.supplyAsync(
+        () -> scm.getScmHAManager().installSnapshotFromLeader(leaderNodeId),
+        installSnapshotExecutor);
+    return future;
+  }
+
+  @Override
+  public void notifyLeaderChanged(RaftGroupMemberId groupMemberId,
+                                  RaftPeerId newLeaderId) {
+    if (!isInitialized) {
+      return;
+    }
+    if (!groupMemberId.getPeerId().equals(newLeaderId)) {
+      LOG.info("leader changed, yet current SCM is still follower.");
+      return;
+    }
+
+    long term = scm.getScmHAManager()
+        .getRatisServer()
+        .getDivision()
+        .getInfo()
+        .getCurrentTerm();
+
+    LOG.info("current SCM becomes leader of term {}.", term);
+
+    scm.getScmContext().updateLeaderAndTerm(true, term);
+    scm.getSCMServiceManager().notifyStatusChanged();
+    scm.getSequenceIdGen().invalidateBatch();
+
+    DeletedBlockLog deletedBlockLog = scm.getScmBlockManager()
+        .getDeletedBlockLog();
+    Preconditions.checkArgument(
+        deletedBlockLog instanceof DeletedBlockLogImplV2);
+    ((DeletedBlockLogImplV2) deletedBlockLog).onBecomeLeader();
+  }
+
+  @Override
+  public long takeSnapshot() throws IOException {
+    TermIndex lastTermIndex = getLastAppliedTermIndex();
+    long lastAppliedIndex = lastTermIndex.getIndex();
+
+    if (!isInitialized) {
+      return lastAppliedIndex;
+    }
+
+    long startTime = Time.monotonicNow();
+
+    TransactionInfo latestTrxInfo = transactionBuffer.getLatestTrxInfo();
+    TransactionInfo lastAppliedTrxInfo =
+        TransactionInfo.fromTermIndex(lastTermIndex);
+
+    if (latestTrxInfo.compareTo(lastAppliedTrxInfo) < 0) {
+      transactionBuffer.updateLatestTrxInfo(lastAppliedTrxInfo);
+      transactionBuffer.setLatestSnapshot(lastAppliedTrxInfo.toSnapshotInfo());
+    } else {
+      lastAppliedIndex = latestTrxInfo.getTransactionIndex();
+    }
+
+    transactionBuffer.flush();
+
+    LOG.info("Current Snapshot Index {}, takeSnapshot took {} ms",
+        lastAppliedIndex, Time.monotonicNow() - startTime);
+    return lastAppliedIndex;
+  }
+
+  @Override
+  public void notifyTermIndexUpdated(long term, long index) {
+    if (transactionBuffer != null) {
+      transactionBuffer.updateLatestTrxInfo(
+          TransactionInfo.builder().setCurrentTerm(term)
+              .setTransactionIndex(index).build());
+    }
+    // We need to call updateLastApplied here because now in ratis when a
+    // node becomes leader, it is checking stateMachineIndex >=
+    // placeHolderIndex (when a node becomes leader, it writes a conf entry
+    // with some information like its peers and termIndex). So, calling
+    // updateLastApplied updates lastAppliedTermIndex.
+    updateLastAppliedTermIndex(term, index);
+  }
+
+  @Override
+  public void notifyConfigurationChanged(long term, long index,
+      RaftProtos.RaftConfigurationProto newRaftConfiguration) {
+  }
+
+  @Override
+  public void pause() {
+    getLifeCycle().transition(LifeCycle.State.PAUSING);
+    getLifeCycle().transition(LifeCycle.State.PAUSED);
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (!isInitialized) {
+      return;
+    }
+    super.close();
+    transactionBuffer.close();
+    HadoopExecutors.
+        shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS);
+  }
+  /**
+   * Unpause the StateMachine, re-initialize the DoubleBuffer and update the
+   * lastAppliedIndex. This should be done after uploading new state to the
+   * StateMachine.
+   */
+  public void unpause(long newLastAppliedSnapShotTerm,
+      long newLastAppliedSnapshotIndex) {
+    getLifeCycle().startAndTransition(() -> {
+      try {
+        transactionBuffer.init();
+        this.setLastAppliedTermIndex(TermIndex
+            .valueOf(newLastAppliedSnapShotTerm, newLastAppliedSnapshotIndex));
+      } catch (IOException ioe) {
+        LOG.error("Failed to unpause ", ioe);
+      }
+    });
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SequenceIdGenerator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SequenceIdGenerator.java
new file mode 100644
index 0000000..d223f93
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SequenceIdGenerator.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.metadata.Replicate;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
+import org.apache.hadoop.hdds.utils.UniqueId;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.reflect.Proxy;
+import java.time.LocalDate;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import static org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType.SEQUENCE_ID;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SEQUENCE_ID_BATCH_SIZE;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SEQUENCE_ID_BATCH_SIZE_DEFAULT;
+
+/**
+ * After SCM starts, set lastId = 0, nextId = lastId + 1.
+ * The first getNextId() call triggers SCM to load lastId from rocksDB,
+ * and allocate a new batch.
+ *
+ * In order to maintain monotonicity, for Ratis based SequenceIdGen,
+ * when becoming leader, SCM invalidates un-exhausted id batch by setting
+ * nextId = lastId + 1, so that a new leader will reload lastId from
+ * rocksDB and allocate a new batch when receiving its first getNextId() call.
+ */
+public class SequenceIdGenerator {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SequenceIdGenerator.class);
+
+  /**
+   * Ids supported.
+   */
+  public static final String LOCAL_ID = "localId";
+  public static final String DEL_TXN_ID = "delTxnId";
+  public static final String CONTAINER_ID = "containerId";
+
+  private static final long INVALID_SEQUENCE_ID = 0;
+
+  static class Batch {
+    // The upper bound of the batch.
+    private long lastId = INVALID_SEQUENCE_ID;
+    // The next id to be allocated in this batch.
+    private long nextId = lastId + 1;
+  }
+
+  private final Map<String, Batch> sequenceIdToBatchMap;
+
+  private final Lock lock;
+  private final long batchSize;
+  private final StateManager stateManager;
+
+  /**
+   * @param conf            : conf
+   * @param scmhaManager    : scmhaManager
+   * @param sequenceIdTable : sequenceIdTable
+   */
+  public SequenceIdGenerator(ConfigurationSource conf,
+      SCMHAManager scmhaManager, Table<String, Long> sequenceIdTable) {
+    this.sequenceIdToBatchMap = new HashMap<>();
+    this.lock = new ReentrantLock();
+    this.batchSize = conf.getInt(OZONE_SCM_SEQUENCE_ID_BATCH_SIZE,
+        OZONE_SCM_SEQUENCE_ID_BATCH_SIZE_DEFAULT);
+
+    Preconditions.checkNotNull(scmhaManager);
+    this.stateManager = new StateManagerImpl.Builder()
+        .setRatisServer(scmhaManager.getRatisServer())
+        .setDBTransactionBuffer(scmhaManager.getDBTransactionBuffer())
+        .setSequenceIdTable(sequenceIdTable).build();
+  }
+
+  /**
+   * @param sequenceIdName : name of the sequenceId
+   * @return : next id of this sequenceId.
+   */
+  public long getNextId(String sequenceIdName) {
+    lock.lock();
+    try {
+      Batch batch = sequenceIdToBatchMap.computeIfAbsent(
+          sequenceIdName, key -> new Batch());
+
+      if (batch.nextId <= batch.lastId) {
+        return batch.nextId++;
+      }
+
+      Preconditions.checkArgument(batch.nextId == batch.lastId + 1);
+      while (true) {
+        Long prevLastId = batch.lastId;
+        batch.nextId = prevLastId + 1;
+
+        Preconditions.checkArgument(Long.MAX_VALUE - batch.lastId >= batchSize);
+        batch.lastId += batchSize;
+
+        if (stateManager.allocateBatch(sequenceIdName,
+            prevLastId, batch.lastId)) {
+          LOG.info("Allocate a batch for {}, change lastId from {} to {}.",
+              sequenceIdName, prevLastId, batch.lastId);
+          break;
+        }
+
+        // reload lastId from RocksDB.
+        batch.lastId = stateManager.getLastId(sequenceIdName);
+      }
+
+      Preconditions.checkArgument(batch.nextId <= batch.lastId);
+      return batch.nextId++;
+
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Invalidate any un-exhausted batch, next getNextId() call will
+   * allocate a new batch.
+   */
+  public void invalidateBatch() {
+    lock.lock();
+    try {
+      sequenceIdToBatchMap.forEach(
+          (sequenceId, batch) -> batch.nextId = batch.lastId + 1);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Maintain SequenceIdTable in RocksDB.
+   */
+  interface StateManager {
+    /**
+     * Compare And Swap lastId saved in db from expectedLastId to newLastId.
+     * If based on Ratis, it will submit a raft client request.
+     *
+     * @param sequenceIdName : name of the sequence id.
+     * @param expectedLastId : the expected lastId saved in db
+     * @param newLastId      : the new lastId to save in db
+     * @return               : result of the C.A.S.
+     */
+    @Replicate
+    Boolean allocateBatch(String sequenceIdName,
+                          Long expectedLastId, Long newLastId);
+
+    /**
+     * @param sequenceIdName : name of the sequence id.
+     * @return lastId saved in db
+     */
+    Long getLastId(String sequenceIdName);
+  }
+
+  /**
+   * Ratis based StateManager, db operations are queued in
+   * DBTransactionBuffer until a snapshot is taken.
+   */
+  static final class StateManagerImpl implements StateManager {
+    private final Table<String, Long> sequenceIdTable;
+    private final DBTransactionBuffer transactionBuffer;
+    private final Map<String, Long> sequenceIdToLastIdMap;
+
+    private StateManagerImpl(Table<String, Long> sequenceIdTable,
+                               DBTransactionBuffer trxBuffer) {
+      this.sequenceIdTable = sequenceIdTable;
+      this.transactionBuffer = trxBuffer;
+      this.sequenceIdToLastIdMap = new ConcurrentHashMap<>();
+      LOG.info("Init the HA SequenceIdGenerator.");
+    }
+
+    @Override
+    public Boolean allocateBatch(String sequenceIdName,
+                                 Long expectedLastId, Long newLastId) {
+      Long lastId = sequenceIdToLastIdMap.computeIfAbsent(sequenceIdName,
+          key -> {
+            try {
+              Long idInDb = this.sequenceIdTable.get(key);
+              return idInDb != null ? idInDb : INVALID_SEQUENCE_ID;
+            } catch (IOException ioe) {
+              throw new RuntimeException("Failed to get lastId from db", ioe);
+            }
+          });
+
+      if (!lastId.equals(expectedLastId)) {
+        LOG.warn("Failed to allocate a batch for {}, expected lastId is {}," +
+            " actual lastId is {}.", sequenceIdName, expectedLastId, lastId);
+        return false;
+      }
+
+      try {
+        transactionBuffer
+            .addToBuffer(sequenceIdTable, sequenceIdName, newLastId);
+      } catch (IOException ioe) {
+        throw new RuntimeException("Failed to put lastId to Batch", ioe);
+      }
+
+      sequenceIdToLastIdMap.put(sequenceIdName, newLastId);
+      return true;
+    }
+
+    @Override
+    public Long getLastId(String sequenceIdName) {
+      return sequenceIdToLastIdMap.get(sequenceIdName);
+    }
+
+    /**
+     * Builder for Ratis based StateManager.
+     */
+    public static class Builder {
+      private Table<String, Long> table;
+      private DBTransactionBuffer buffer;
+      private SCMRatisServer ratisServer;
+
+      public Builder setRatisServer(final SCMRatisServer scmRatisServer) {
+        this.ratisServer = scmRatisServer;
+        return this;
+      }
+
+      public Builder setSequenceIdTable(
+          final Table<String, Long> sequenceIdTable) {
+        table = sequenceIdTable;
+        return this;
+      }
+
+      public Builder setDBTransactionBuffer(DBTransactionBuffer trxBuffer) {
+        buffer = trxBuffer;
+        return this;
+      }
+
+      public StateManager build() {
+        Preconditions.checkNotNull(table);
+        Preconditions.checkNotNull(buffer);
+
+        final StateManager impl = new StateManagerImpl(table, buffer);
+
+        final SCMHAInvocationHandler invocationHandler
+            = new SCMHAInvocationHandler(SEQUENCE_ID, impl, ratisServer);
+
+        return (StateManager) Proxy.newProxyInstance(
+            SCMHAInvocationHandler.class.getClassLoader(),
+            new Class<?>[]{StateManager.class},
+            invocationHandler);
+      }
+    }
+  }
+
+  /**
+   * TODO
+   *  Relocate the code after upgrade framework is ready.
+   *
+   * Upgrade localID, delTxnId, containerId from legacy solution
+   * to SequenceIdGenerator.
+   */
+  public static void upgradeToSequenceId(SCMMetadataStore scmMetadataStore)
+      throws IOException {
+    Table<String, Long> sequenceIdTable = scmMetadataStore.getSequenceIdTable();
+
+    // upgrade localId
+    // Short-term solution: when setup multi SCM from scratch, they need
+    // achieve an agreement upon the initial value of LOCAL_ID.
+    // Long-term solution: the bootstrapped SCM will explicitly download
+    // scm.db from leader SCM, and drop its own scm.db. Thus the upgrade
+    // operations can take effect exactly once in a SCM HA cluster.
+    if (sequenceIdTable.get(LOCAL_ID) == null) {
+      long millisSinceEpoch = TimeUnit.DAYS.toMillis(
+          LocalDate.of(LocalDate.now().getYear() + 1, 1, 1).toEpochDay());
+
+      long localId = millisSinceEpoch << Short.SIZE;
+      Preconditions.checkArgument(localId > UniqueId.next());
+
+      sequenceIdTable.put(LOCAL_ID, localId);
+      LOG.info("upgrade {} to {}", LOCAL_ID, sequenceIdTable.get(LOCAL_ID));
+    }
+
+    // upgrade delTxnId
+    if (sequenceIdTable.get(DEL_TXN_ID) == null) {
+      // fetch delTxnId from DeletedBlocksTXTable
+      // check HDDS-4477 for details.
+      DeletedBlocksTransaction txn
+          = scmMetadataStore.getDeletedBlocksTXTable().get(0L);
+      sequenceIdTable.put(DEL_TXN_ID, txn != null ? txn.getTxID() : 0L);
+      LOG.info("upgrade {} to {}", DEL_TXN_ID, sequenceIdTable.get(DEL_TXN_ID));
+    }
+
+    // upgrade containerId
+    if (sequenceIdTable.get(CONTAINER_ID) == null) {
+      long largestContainerId = 0;
+      TableIterator<ContainerID, ? extends KeyValue<ContainerID, ContainerInfo>>
+          iterator = scmMetadataStore.getContainerTable().iterator();
+      while (iterator.hasNext()) {
+        ContainerInfo containerInfo = iterator.next().getValue();
+        largestContainerId
+            = Long.max(containerInfo.getContainerID(), largestContainerId);
+      }
+      sequenceIdTable.put(CONTAINER_ID, largestContainerId);
+      LOG.info("upgrade {} to {}",
+          CONTAINER_ID, sequenceIdTable.get(CONTAINER_ID));
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BigIntegerCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BigIntegerCodec.java
new file mode 100644
index 0000000..d2a4423
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BigIntegerCodec.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+
+import java.math.BigInteger;
+
+/**
+ * Codec for type BigInteger.
+ */
+public class BigIntegerCodec implements Codec {
+
+  @Override
+  public ByteString serialize(Object object) {
+    return ByteString.copyFrom(((BigInteger)object).toByteArray());
+  }
+
+  @Override
+  public Object deserialize(Class< ? > type, ByteString value) {
+    return new BigInteger(value.toByteArray());
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BooleanCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BooleanCodec.java
new file mode 100644
index 0000000..0fb10d6
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/BooleanCodec.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public class BooleanCodec implements Codec {
+  @Override
+  public ByteString serialize(Object object) {
+    return ByteString.copyFrom(((Boolean) object).toString().getBytes(UTF_8));
+  }
+
+  @Override
+  public Object deserialize(Class<?> type, ByteString value) {
+    return Boolean.parseBoolean(new String(value.toByteArray(), UTF_8));
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/Codec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/Codec.java
new file mode 100644
index 0000000..2e16376
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/Codec.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+public interface Codec {
+
+  ByteString serialize(Object object) throws InvalidProtocolBufferException;
+
+  Object deserialize(Class<?> type, ByteString value)
+      throws InvalidProtocolBufferException;
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
new file mode 100644
index 0000000..fefb983
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.GeneratedMessage;
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.ProtocolMessageEnum;
+
+import java.math.BigInteger;
+import java.security.cert.X509Certificate;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public final class CodecFactory {
+
+  private static Map<Class<?>, Codec> codecs = new HashMap<>();
+
+  static {
+    codecs.put(GeneratedMessage.class, new GeneratedMessageCodec());
+    codecs.put(ProtocolMessageEnum.class, new EnumCodec());
+    codecs.put(List.class, new ListCodec());
+    codecs.put(Long.class, new LongCodec());
+    codecs.put(String.class, new StringCodec());
+    codecs.put(Boolean.class, new BooleanCodec());
+    codecs.put(BigInteger.class, new BigIntegerCodec());
+    codecs.put(X509Certificate.class, new X509CertificateCodec());
+  }
+
+  private CodecFactory() {}
+
+  public static Codec getCodec(Class<?> type)
+      throws InvalidProtocolBufferException {
+    final List<Class<?>> classes = new ArrayList<>();
+    classes.add(type);
+    classes.add(type.getSuperclass());
+    classes.addAll(Arrays.asList(type.getInterfaces()));
+    for (Class<?> clazz : classes) {
+      if (codecs.containsKey(clazz)) {
+        return codecs.get(clazz);
+      }
+    }
+    throw new InvalidProtocolBufferException(
+        "Codec for " + type + " not found!");
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/EnumCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/EnumCodec.java
new file mode 100644
index 0000000..bca71ed
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/EnumCodec.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.common.primitives.Ints;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+import com.google.protobuf.ProtocolMessageEnum;
+import org.apache.hadoop.hdds.scm.ha.ReflectionUtil;
+
+import java.lang.reflect.InvocationTargetException;
+
+public class EnumCodec implements Codec {
+
+  @Override
+  public ByteString serialize(Object object)
+      throws InvalidProtocolBufferException {
+    return ByteString.copyFrom(Ints.toByteArray(
+        ((ProtocolMessageEnum) object).getNumber()));
+  }
+
+  @Override
+  public Object deserialize(Class<?> type, ByteString value)
+      throws InvalidProtocolBufferException {
+    try {
+      return ReflectionUtil.getMethod(type, "valueOf", int.class)
+          .invoke(null, Ints.fromByteArray(
+              value.toByteArray()));
+    } catch (NoSuchMethodException | IllegalAccessException
+        | InvocationTargetException ex) {
+      throw new InvalidProtocolBufferException(
+          "GeneratedMessage cannot be decoded!" + ex.getMessage());
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/GeneratedMessageCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/GeneratedMessageCodec.java
new file mode 100644
index 0000000..acfc719
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/GeneratedMessageCodec.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.GeneratedMessage;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.scm.ha.ReflectionUtil;
+
+import java.lang.reflect.InvocationTargetException;
+
+public class GeneratedMessageCodec implements Codec {
+
+  @Override
+  public ByteString serialize(Object object) {
+    return ((GeneratedMessage)object).toByteString();
+  }
+
+  @Override
+  public GeneratedMessage deserialize(Class<?> type, ByteString value)
+      throws InvalidProtocolBufferException {
+    try {
+      return (GeneratedMessage) ReflectionUtil.getMethod(type,
+          "parseFrom", byte[].class)
+          .invoke(null, (Object) value.toByteArray());
+    } catch (NoSuchMethodException | IllegalAccessException
+        | InvocationTargetException ex) {
+      ex.printStackTrace();
+      throw new InvalidProtocolBufferException(
+          "GeneratedMessage cannot be decoded: " + ex.getMessage());
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ListCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ListCodec.java
new file mode 100644
index 0000000..0dbb1c0
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ListCodec.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.ListArgument;
+import org.apache.hadoop.hdds.scm.ha.ReflectionUtil;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.List;
+
+public class ListCodec implements Codec {
+
+  @Override
+  public ByteString serialize(Object object)
+      throws InvalidProtocolBufferException {
+    final ListArgument.Builder listArgs = ListArgument.newBuilder();
+    final List<?> values = (List<?>) object;
+    if (!values.isEmpty()) {
+      Class<?> type = values.get(0).getClass();
+      listArgs.setType(type.getName());
+      for (Object value : values) {
+        listArgs.addValue(CodecFactory.getCodec(type).serialize(value));
+      }
+    } else {
+      listArgs.setType(Object.class.getName());
+    }
+    return listArgs.build().toByteString();
+  }
+
+  @Override
+  public Object deserialize(Class<?> type, ByteString value)
+      throws InvalidProtocolBufferException {
+    try {
+
+      List<Object> result = (List<Object>) type.newInstance();
+      final ListArgument listArgs = (ListArgument) ReflectionUtil
+          .getMethod(ListArgument.class, "parseFrom", byte[].class)
+          .invoke(null, (Object) value.toByteArray());
+      final Class<?> dataType = ReflectionUtil.getClass(listArgs.getType());
+      for (ByteString element : listArgs.getValueList()) {
+        result.add(CodecFactory.getCodec(dataType)
+            .deserialize(dataType, element));
+      }
+      return result;
+    } catch (InstantiationException | NoSuchMethodException |
+        IllegalAccessException | InvocationTargetException |
+        ClassNotFoundException ex) {
+      throw new InvalidProtocolBufferException(
+          "GeneratedMessage cannot be decoded: " + ex.getMessage());
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/LongCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/LongCodec.java
new file mode 100644
index 0000000..1a02dbd
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/LongCodec.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.common.primitives.Longs;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+public class LongCodec implements Codec {
+
+  @Override
+  public ByteString serialize(Object object)
+      throws InvalidProtocolBufferException {
+    return ByteString.copyFrom(Longs.toByteArray((Long) object));
+  }
+
+  @Override
+  public Object deserialize(Class<?> type, ByteString value)
+      throws InvalidProtocolBufferException {
+    return Longs.fromByteArray(value.toByteArray());
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/StringCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/StringCodec.java
new file mode 100644
index 0000000..fc66ee1
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/StringCodec.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+public class StringCodec implements Codec {
+  @Override
+  public ByteString serialize(Object object) {
+    return ByteString.copyFrom(((String) object).getBytes(UTF_8));
+  }
+
+  @Override
+  public Object deserialize(Class<?> type, ByteString value) {
+    return new String(value.toByteArray(), UTF_8);
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/X509CertificateCodec.java
new file mode 100644
index 0000000..9a24baa
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/X509CertificateCodec.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
+
+import java.security.cert.X509Certificate;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * Codec for type X509Certificate.
+ */
+public class X509CertificateCodec implements Codec {
+  @Override
+  public ByteString serialize(Object object)
+      throws InvalidProtocolBufferException {
+    try {
+      String certString =
+          CertificateCodec.getPEMEncodedString((X509Certificate) object);
+      return ByteString.copyFrom(certString.getBytes(UTF_8));
+    } catch (Exception ex) {
+      throw new InvalidProtocolBufferException(
+          "X509Certificate cannot be decoded: " + ex.getMessage());
+    }
+  }
+
+  @Override
+  public Object deserialize(Class< ? > type, ByteString value)
+      throws InvalidProtocolBufferException {
+    try {
+      String pemEncodedCert = new String(value.toByteArray(), UTF_8);
+      return CertificateCodec.getX509Certificate(pemEncodedCert);
+    } catch (Exception ex) {
+      throw new InvalidProtocolBufferException(
+          "X509Certificate cannot be decoded: " + ex.getMessage());
+    }
+  }
+}
+
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/package-info.java
similarity index 87%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/package-info.java
index 4944017..718b76c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,8 +15,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
 
 /**
- * This package contains classes related to Apache Ratis for SCM.
+ * This package contains classes related to SCM HA Serialization.
  */
+package org.apache.hadoop.hdds.scm.ha.io;
+
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
similarity index 88%
rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
index 4944017..06fe168 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java
@@ -15,8 +15,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
+package org.apache.hadoop.hdds.scm.ha;
 
 /**
- * This package contains classes related to Apache Ratis for SCM.
+ * This package contains classes related to SCM HA.
  */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
index 87c9e91..cb02e31 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
@@ -38,11 +38,11 @@
 
   @Override
   public ContainerID fromPersistedFormat(byte[] rawData) throws IOException {
-    return new ContainerID(longCodec.fromPersistedFormat(rawData));
+    return ContainerID.valueOf(longCodec.fromPersistedFormat(rawData));
   }
 
   @Override
   public ContainerID copyObject(ContainerID object) {
-    return new ContainerID(object.getId());
+    return ContainerID.valueOf(object.getId());
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java
index 838f117..8d02ec3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMDBDefinition.java
@@ -24,9 +24,11 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.security.x509.crl.CRLInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.utils.TransactionInfoCodec;
 import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.DBDefinition;
 import org.apache.hadoop.hdds.utils.db.LongCodec;
@@ -56,6 +58,15 @@
           new X509CertificateCodec());
 
   public static final DBColumnFamilyDefinition<BigInteger, X509Certificate>
+      VALID_SCM_CERTS =
+      new DBColumnFamilyDefinition<>(
+          "validSCMCerts",
+          BigInteger.class,
+          new BigIntegerCodec(),
+          X509Certificate.class,
+          new X509CertificateCodec());
+
+  public static final DBColumnFamilyDefinition<BigInteger, X509Certificate>
       REVOKED_CERTS =
       new DBColumnFamilyDefinition<>(
           "revokedCerts",
@@ -82,6 +93,15 @@
           ContainerInfo.class,
           new ContainerInfoCodec());
 
+  public static final DBColumnFamilyDefinition<String, TransactionInfo>
+      TRANSACTIONINFO =
+      new DBColumnFamilyDefinition<>(
+          "scmTransactionInfos",
+          String.class,
+          new StringCodec(),
+          TransactionInfo.class,
+          new TransactionInfoCodec());
+
   public static final DBColumnFamilyDefinition<Long, CRLInfo> CRLS =
       new DBColumnFamilyDefinition<>(
           "crls",
@@ -99,6 +119,15 @@
           Long.class,
           new LongCodec());
 
+  public static final DBColumnFamilyDefinition<String, Long>
+      SEQUENCE_ID =
+      new DBColumnFamilyDefinition<>(
+          "sequenceId",
+          String.class,
+          new StringCodec(),
+          Long.class,
+          new LongCodec());
+
   @Override
   public String getName() {
     return "scm.db";
@@ -112,6 +141,7 @@
   @Override
   public DBColumnFamilyDefinition[] getColumnFamilies() {
     return new DBColumnFamilyDefinition[] {DELETED_BLOCKS, VALID_CERTS,
-        REVOKED_CERTS, PIPELINES, CONTAINERS, CRLS, CRL_SEQUENCE_ID};
+        VALID_SCM_CERTS, REVOKED_CERTS, PIPELINES, CONTAINERS, TRANSACTIONINFO,
+        CRLS, CRL_SEQUENCE_ID, SEQUENCE_ID};
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
index 9b3345e..618c4e1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.metadata;
 
+import java.io.File;
 import java.io.IOException;
 import java.math.BigInteger;
 import java.security.cert.X509Certificate;
@@ -25,6 +26,8 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.utils.HAUtils;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
@@ -41,7 +44,13 @@
 import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.DELETED_BLOCKS;
 import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.PIPELINES;
 import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.REVOKED_CERTS;
+import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.TRANSACTIONINFO;
 import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.VALID_CERTS;
+import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.VALID_SCM_CERTS;
+import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.SEQUENCE_ID;
+import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER;
+
+import org.apache.ratis.util.ExitUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,16 +64,22 @@
 
   private Table<BigInteger, X509Certificate> validCertsTable;
 
+  private Table<BigInteger, X509Certificate> validSCMCertsTable;
+
   private Table<BigInteger, X509Certificate> revokedCertsTable;
 
   private Table<ContainerID, ContainerInfo> containerTable;
 
   private Table<PipelineID, Pipeline> pipelineTable;
 
+  private Table<String, TransactionInfo> transactionInfoTable;
+
   private Table<Long, CRLInfo> crlInfoTable;
 
   private Table<String, Long> crlSequenceIdTable;
 
+  private Table<String, Long> sequenceIdTable;
+
   private static final Logger LOG =
       LoggerFactory.getLogger(SCMMetadataStoreImpl.class);
   private DBStore store;
@@ -87,6 +102,23 @@
       throws IOException {
     if (this.store == null) {
 
+      File metaDir = HAUtils.getMetaDir(new SCMDBDefinition(), configuration);
+      // Check if there is a DB Inconsistent Marker in the metaDir. This
+      // marker indicates that the DB is in an inconsistent state and hence
+      // the OM process should be terminated.
+      File markerFile = new File(metaDir, DB_TRANSIENT_MARKER);
+      if (markerFile.exists()) {
+        LOG.error("File {} marks that SCM DB is in an inconsistent state.",
+            markerFile);
+        // Note - The marker file should be deleted only after fixing the DB.
+        // In an HA setup, this can be done by replacing this DB with a
+        // checkpoint from another SCM.
+        String errorMsg = "Cannot load SCM DB as it is in an inconsistent " +
+            "state.";
+        ExitUtils.terminate(1, errorMsg, LOG);
+      }
+
+
       this.store = DBStoreBuilder.createDBStore(config, new SCMDBDefinition());
 
       deletedBlocksTable =
@@ -99,17 +131,33 @@
 
       checkTableStatus(validCertsTable, VALID_CERTS.getName());
 
+      validSCMCertsTable = VALID_SCM_CERTS.getTable(store);
+
+      checkTableStatus(validSCMCertsTable, VALID_SCM_CERTS.getName());
+
       revokedCertsTable = REVOKED_CERTS.getTable(store);
 
       checkTableStatus(revokedCertsTable, REVOKED_CERTS.getName());
 
       pipelineTable = PIPELINES.getTable(store);
 
+      checkTableStatus(pipelineTable, PIPELINES.getName());
+
       containerTable = CONTAINERS.getTable(store);
 
+      checkTableStatus(containerTable, CONTAINERS.getName());
+
+      transactionInfoTable = TRANSACTIONINFO.getTable(store);
+
+      checkTableStatus(transactionInfoTable, TRANSACTIONINFO.getName());
+
       crlInfoTable = CRLS.getTable(store);
 
       crlSequenceIdTable = CRL_SEQUENCE_ID.getTable(store);
+
+      sequenceIdTable = SEQUENCE_ID.getTable(store);
+
+      checkTableStatus(sequenceIdTable, SEQUENCE_ID.getName());
     }
   }
 
@@ -138,6 +186,11 @@
   }
 
   @Override
+  public Table<BigInteger, X509Certificate> getValidSCMCertsTable() {
+    return validSCMCertsTable;
+  }
+
+  @Override
   public Table<BigInteger, X509Certificate> getRevokedCertsTable() {
     return revokedCertsTable;
   }
@@ -182,6 +235,11 @@
   }
 
   @Override
+  public Table<String, TransactionInfo> getTransactionInfoTable() {
+    return transactionInfoTable;
+  }
+
+  @Override
   public BatchOperationHandler getBatchHandler() {
     return this.store;
   }
@@ -191,7 +249,10 @@
     return containerTable;
   }
 
-
+  @Override
+  public Table<String, Long> getSequenceIdTable() {
+    return sequenceIdTable;
+  }
 
   private void checkTableStatus(Table table, String name) throws IOException {
     String logMessage = "Unable to get a reference to %s table. Cannot " +
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
index b4fc28a..7f9b942 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerException;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
@@ -45,14 +45,14 @@
 
   private final NodeManager nodeManager;
   private final PipelineManager pipelineManager;
-  private final ContainerManager containerManager;
+  private final ContainerManagerV2 containerManager;
 
   private static final Logger LOG =
       LoggerFactory.getLogger(DeadNodeHandler.class);
 
   public DeadNodeHandler(final NodeManager nodeManager,
                          final PipelineManager pipelineManager,
-                         final ContainerManager containerManager) {
+                         final ContainerManagerV2 containerManager) {
     this.nodeManager = nodeManager;
     this.pipelineManager = pipelineManager;
     this.containerManager = containerManager;
@@ -101,7 +101,7 @@
         .ifPresent(pipelines ->
             pipelines.forEach(id -> {
               try {
-                pipelineManager.finalizeAndDestroyPipeline(
+                pipelineManager.closePipeline(
                     pipelineManager.getPipeline(id), false);
               } catch (PipelineNotFoundException ignore) {
                 // Pipeline is not there in pipeline manager,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
index f0f9b72..674cf2d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
@@ -21,6 +21,8 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -32,35 +34,39 @@
  * Handles New Node event.
  */
 public class NewNodeHandler implements EventHandler<DatanodeDetails> {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(NewNodeHandler.class);
 
   private final PipelineManager pipelineManager;
   private final NodeDecommissionManager decommissionManager;
   private final ConfigurationSource conf;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(NewNodeHandler.class);
+  private final SCMServiceManager serviceManager;
 
   public NewNodeHandler(PipelineManager pipelineManager,
       NodeDecommissionManager decommissionManager,
-      ConfigurationSource conf) {
+      ConfigurationSource conf,
+      SCMServiceManager serviceManager) {
     this.pipelineManager = pipelineManager;
     this.decommissionManager = decommissionManager;
     this.conf = conf;
+    this.serviceManager = serviceManager;
   }
 
   @Override
   public void onMessage(DatanodeDetails datanodeDetails,
       EventPublisher publisher) {
-    pipelineManager.triggerPipelineCreation();
-    if (datanodeDetails.getPersistedOpState()
-        != HddsProtos.NodeOperationalState.IN_SERVICE) {
-      try {
+    try {
+      serviceManager.notifyEventTriggered(Event.NEW_NODE_HANDLER_TRIGGERED);
+
+      if (datanodeDetails.getPersistedOpState()
+          != HddsProtos.NodeOperationalState.IN_SERVICE) {
         decommissionManager.continueAdminForNode(datanodeDetails);
-      } catch (NodeNotFoundException e) {
-        // Should not happen, as the node has just registered to call this event
-        // handler.
-        LOG.warn("NodeNotFound when adding the node to the decommissionManager",
-            e);
       }
+    } catch (NodeNotFoundException e) {
+      // Should not happen, as the node has just registered to call this event
+      // handler.
+      LOG.warn("NodeNotFound when adding the node to the decommissionManager",
+          e);
     }
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
index d2ee4e4..e56eb84 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
@@ -23,7 +23,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ReplicationManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -51,7 +51,7 @@
   private DatanodeAdminMonitor monitor;
 
   private NodeManager nodeManager;
-  //private ContainerManager containerManager;
+  //private ContainerManagerV2 containerManager;
   private EventPublisher eventQueue;
   private ReplicationManager replicationManager;
   private OzoneConfiguration conf;
@@ -171,7 +171,7 @@
   }
 
   public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm,
-      ContainerManager containerManager,
+      ContainerManagerV2 containerManager,
       EventPublisher eventQueue, ReplicationManager rm) {
     this.nodeManager = nm;
     conf = config;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
index cc32f84..d74f90f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
@@ -20,28 +20,34 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Handles Stale node event.
  */
 public class NonHealthyToHealthyNodeHandler
     implements EventHandler<DatanodeDetails> {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(NonHealthyToHealthyNodeHandler.class);
 
-  private final PipelineManager pipelineManager;
   private final ConfigurationSource conf;
+  private final SCMServiceManager serviceManager;
 
   public NonHealthyToHealthyNodeHandler(
-      PipelineManager pipelineManager, OzoneConfiguration conf) {
-    this.pipelineManager = pipelineManager;
+      OzoneConfiguration conf,  SCMServiceManager serviceManager) {
     this.conf = conf;
+    this.serviceManager = serviceManager;
   }
 
   @Override
   public void onMessage(DatanodeDetails datanodeDetails,
       EventPublisher publisher) {
-    pipelineManager.triggerPipelineCreation();
+    serviceManager.notifyEventTriggered(
+        Event.UNHEALTHY_TO_HEALTHY_NODE_HANDLER_TRIGGERED);
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index b5ecaac..6c02bdd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -57,6 +58,7 @@
 import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -108,13 +110,14 @@
       new ConcurrentHashMap<>();
   private final int numPipelinesPerMetadataVolume;
   private final int heavyNodeCriteria;
+  private final SCMContext scmContext;
 
   /**
    * Constructs SCM machine Manager.
    */
   public SCMNodeManager(OzoneConfiguration conf,
       SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher,
-      NetworkTopology networkTopology) {
+      NetworkTopology networkTopology, SCMContext scmContext) {
     this.nodeStateManager = new NodeStateManager(conf, eventPublisher);
     this.version = VersionInfo.getLatestVersion();
     this.commandQueue = new CommandQueue();
@@ -140,6 +143,7 @@
             ScmConfigKeys.OZONE_SCM_PIPELINE_PER_METADATA_VOLUME_DEFAULT);
     String dnLimit = conf.get(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT);
     this.heavyNodeCriteria = dnLimit == null ? 0 : Integer.parseInt(dnLimit);
+    this.scmContext = scmContext;
   }
 
   private void registerMXBean() {
@@ -430,11 +434,18 @@
           scmStatus.getOperationalState(),
           scmStatus.getOpStateExpiryEpochSeconds());
 
-      onMessage(new CommandForDatanode(reportedDn.getUuid(),
-          new SetNodeOperationalStateCommand(
-              Time.monotonicNow(), scmStatus.getOperationalState(),
-              scmStatus.getOpStateExpiryEpochSeconds())
-      ), null);
+      try {
+        SCMCommand<?> command = new SetNodeOperationalStateCommand(
+            Time.monotonicNow(),
+            scmStatus.getOperationalState(),
+            scmStatus.getOpStateExpiryEpochSeconds());
+        command.setTerm(scmContext.getTermOfLeader());
+        addDatanodeCommand(reportedDn.getUuid(), command);
+      } catch (NotLeaderException nle) {
+        LOG.warn("Skip sending SetNodeOperationalStateCommand,"
+            + " since current SCM is not leader.", nle);
+        return;
+      }
     }
     DatanodeDetails scmDnd = nodeStateManager.getNode(reportedDn);
     scmDnd.setPersistedOpStateExpiryEpochSec(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
index 5530e73..dd8cea3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
@@ -60,7 +60,7 @@
     for (PipelineID pipelineID : pipelineIds) {
       try {
         Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
-        pipelineManager.finalizeAndDestroyPipeline(pipeline, true);
+        pipelineManager.closePipeline(pipeline, true);
       } catch (IOException e) {
         LOG.info("Could not finalize pipeline={} for dn={}", pipelineID,
             datanodeDetails);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StartDatanodeAdminHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StartDatanodeAdminHandler.java
index 9418a7a..c71b1c9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StartDatanodeAdminHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StartDatanodeAdminHandler.java
@@ -58,7 +58,7 @@
     for (PipelineID pipelineID : pipelineIds) {
       try {
         Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
-        pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+        pipelineManager.closePipeline(pipeline, false);
       } catch (IOException e) {
         LOG.info("Could not finalize pipeline={} for dn={}", pipelineID,
             datanodeDetails);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
index f240293..42b3a93 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
@@ -103,7 +103,7 @@
     }
   }
 
-  private void createPipelines() {
+  private void createPipelines() throws RuntimeException {
     // TODO: #CLUTIL Different replication factor may need to be supported
     HddsProtos.ReplicationType type = HddsProtos.ReplicationType.valueOf(
         conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreatorV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreatorV2.java
new file mode 100644
index 0000000..41d3aa8
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreatorV2.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.collections.iterators.LoopingIterator;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMService;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.util.ExitUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import static org.apache.hadoop.hdds.scm.ha.SCMService.Event.UNHEALTHY_TO_HEALTHY_NODE_HANDLER_TRIGGERED;
+import static org.apache.hadoop.hdds.scm.ha.SCMService.Event.NEW_NODE_HANDLER_TRIGGERED;
+import static org.apache.hadoop.hdds.scm.ha.SCMService.Event.PRE_CHECK_COMPLETED;
+
+/**
+ * Implements api for running background pipeline creation jobs.
+ */
+public class BackgroundPipelineCreatorV2 implements SCMService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BackgroundPipelineCreator.class);
+
+  private final PipelineManager pipelineManager;
+  private final ConfigurationSource conf;
+  private final SCMContext scmContext;
+
+  /**
+   * SCMService related variables.
+   * 1) after leaving safe mode, BackgroundPipelineCreator needs to
+   *    wait for a while before really take effect.
+   * 2) NewNodeHandler, NonHealthyToHealthyNodeHandler, PreCheckComplete
+   *    will trigger a one-shot run of BackgroundPipelineCreator,
+   *    no matter in safe mode or not.
+   */
+  private final Lock serviceLock = new ReentrantLock();
+  private ServiceStatus serviceStatus = ServiceStatus.PAUSING;
+  private final boolean createPipelineInSafeMode;
+  private final long waitTimeInMillis;
+  private long lastTimeToBeReadyInMillis = 0;
+  private boolean oneShotRun = false;
+
+  /**
+   * RatisPipelineUtilsThread is the one which wakes up at
+   * configured interval and tries to create pipelines.
+   */
+  private Thread thread;
+  private final Object monitor = new Object();
+  private static final String THREAD_NAME = "RatisPipelineUtilsThread";
+  private final AtomicBoolean running = new AtomicBoolean(false);
+  private final long intervalInMillis;
+
+
+  BackgroundPipelineCreatorV2(PipelineManager pipelineManager,
+                              ConfigurationSource conf,
+                              SCMServiceManager serviceManager,
+                              SCMContext scmContext) {
+    this.pipelineManager = pipelineManager;
+    this.conf = conf;
+    this.scmContext = scmContext;
+
+    this.createPipelineInSafeMode = conf.getBoolean(
+        HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION,
+        HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION_DEFAULT);
+
+    this.waitTimeInMillis = conf.getTimeDuration(
+        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
+        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT,
+        TimeUnit.MILLISECONDS);
+
+    this.intervalInMillis = conf.getTimeDuration(
+        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL,
+        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL_DEFAULT,
+        TimeUnit.MILLISECONDS);
+
+    // register BackgroundPipelineCreator to SCMServiceManager
+    serviceManager.register(this);
+
+    // start RatisPipelineUtilsThread
+    start();
+  }
+
+  /**
+   * Start RatisPipelineUtilsThread.
+   */
+  @Override
+  public void start() {
+    if (!running.compareAndSet(false, true)) {
+      LOG.warn("{} is already started, just ignore.", THREAD_NAME);
+      return;
+    }
+
+    LOG.info("Starting {}.", THREAD_NAME);
+
+    thread = new ThreadFactoryBuilder()
+        .setDaemon(false)
+        .setNameFormat(THREAD_NAME + " - %d")
+        .setUncaughtExceptionHandler((Thread t, Throwable ex) -> {
+          // gracefully shutdown SCM.
+          scmContext.getScm().stop();
+
+          String message = "Terminate SCM, encounter uncaught exception"
+              + " in RatisPipelineUtilsThread";
+          ExitUtils.terminate(1, message, ex, LOG);
+        })
+        .build()
+        .newThread(this::run);
+
+    thread.start();
+  }
+
+  /**
+   * Stop RatisPipelineUtilsThread.
+   */
+  public void stop() {
+    if (running.compareAndSet(true, false)) {
+      LOG.warn("{} is not running, just ignore.", THREAD_NAME);
+      return;
+    }
+
+    LOG.info("Stopping {}.", THREAD_NAME);
+
+    // in case RatisPipelineUtilsThread is sleeping
+    synchronized (monitor) {
+      monitor.notifyAll();
+    }
+
+    try {
+      thread.join();
+    } catch (InterruptedException e) {
+      LOG.warn("Interrupted during join {}.", THREAD_NAME);
+      Thread.currentThread().interrupt();
+    }
+  }
+
+  private void run() {
+    while (running.get()) {
+      if (shouldRun()) {
+        createPipelines();
+      }
+
+      try {
+        synchronized (monitor) {
+          monitor.wait(intervalInMillis);
+        }
+      } catch (InterruptedException e) {
+        LOG.warn("{} is interrupted.", THREAD_NAME);
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+  private boolean skipCreation(HddsProtos.ReplicationFactor factor,
+                               HddsProtos.ReplicationType type,
+                               boolean autoCreate) {
+    if (type == HddsProtos.ReplicationType.RATIS) {
+      return factor == HddsProtos.ReplicationFactor.ONE && (!autoCreate);
+    } else {
+      // For STAND_ALONE Replication Type, Replication Factor 3 should not be
+      // used.
+      return factor == HddsProtos.ReplicationFactor.THREE;
+    }
+  }
+
+  private void createPipelines() throws RuntimeException {
+    // TODO: #CLUTIL Different replication factor may need to be supported
+    HddsProtos.ReplicationType type = HddsProtos.ReplicationType.valueOf(
+        conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
+            OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT));
+    boolean autoCreateFactorOne = conf.getBoolean(
+        ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE,
+        ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE_DEFAULT);
+
+    List<HddsProtos.ReplicationFactor> list =
+        new ArrayList<>();
+    for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor
+        .values()) {
+      if (skipCreation(factor, type, autoCreateFactorOne)) {
+        // Skip this iteration for creating pipeline
+        continue;
+      }
+      list.add(factor);
+      if (!pipelineManager.getSafeModeStatus()) {
+        try {
+          pipelineManager.scrubPipeline(type, factor);
+        } catch (IOException e) {
+          LOG.error("Error while scrubbing pipelines.", e);
+        }
+      }
+    }
+
+    LoopingIterator it = new LoopingIterator(list);
+    while (it.hasNext()) {
+      HddsProtos.ReplicationFactor factor =
+          (HddsProtos.ReplicationFactor) it.next();
+
+      try {
+        pipelineManager.createPipeline(type, factor);
+      } catch (IOException ioe) {
+        it.remove();
+      } catch (Throwable t) {
+        LOG.error("Error while creating pipelines", t);
+        it.remove();
+      }
+    }
+
+    LOG.debug("BackgroundPipelineCreator createPipelines finished.");
+  }
+
+  @Override
+  public void notifyStatusChanged() {
+    serviceLock.lock();
+    try {
+      // 1) SCMContext#isLeader returns true.
+      // 2) not in safe mode or createPipelineInSafeMode is true
+      if (scmContext.isLeader() &&
+          (!scmContext.isInSafeMode() || createPipelineInSafeMode)) {
+        // transition from PAUSING to RUNNING
+        if (serviceStatus != ServiceStatus.RUNNING) {
+          LOG.info("Service {} transitions to RUNNING.", getServiceName());
+          lastTimeToBeReadyInMillis = Time.monotonicNow();
+          serviceStatus = ServiceStatus.RUNNING;
+        }
+      } else {
+        serviceStatus = ServiceStatus.PAUSING;
+      }
+    } finally {
+      serviceLock.unlock();
+    }
+  }
+
+  @Override
+  public void notifyEventTriggered(Event event) {
+    if (!scmContext.isLeader()) {
+      LOG.info("ignore, not leader SCM.");
+      return;
+    }
+    if (event == NEW_NODE_HANDLER_TRIGGERED
+        || event == UNHEALTHY_TO_HEALTHY_NODE_HANDLER_TRIGGERED
+        || event == PRE_CHECK_COMPLETED) {
+      LOG.info("trigger a one-shot run on {}.", THREAD_NAME);
+      oneShotRun = true;
+
+      synchronized (monitor) {
+        monitor.notifyAll();
+      }
+    }
+  }
+
+  @Override
+  public boolean shouldRun() {
+    serviceLock.lock();
+    try {
+      // check one-short run
+      if (oneShotRun) {
+        oneShotRun = false;
+        return true;
+      }
+
+      // If safe mode is off, then this SCMService starts to run with a delay.
+      return serviceStatus == ServiceStatus.RUNNING && (
+          createPipelineInSafeMode ||
+          Time.monotonicNow() - lastTimeToBeReadyInMillis >= waitTimeInMillis);
+    } finally {
+      serviceLock.unlock();
+    }
+  }
+
+  @Override
+  public String getServiceName() {
+    return BackgroundPipelineCreator.class.getSimpleName();
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
index 0720694..e33f256 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
@@ -23,12 +23,15 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
 
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -44,11 +47,13 @@
       LoggerFactory.getLogger(PipelineActionHandler.class);
 
   private final PipelineManager pipelineManager;
+  private final SCMContext scmContext;
   private final ConfigurationSource ozoneConf;
 
   public PipelineActionHandler(PipelineManager pipelineManager,
-      OzoneConfiguration conf) {
+      SCMContext scmContext, OzoneConfiguration conf) {
     this.pipelineManager = pipelineManager;
+    this.scmContext = scmContext;
     this.ozoneConf = conf;
   }
 
@@ -79,17 +84,24 @@
           info.getDetailedReason());
 
       if (action == PipelineAction.Action.CLOSE) {
-        pipelineManager.finalizeAndDestroyPipeline(
-            pipelineManager.getPipeline(pid), true);
+        pipelineManager.closePipeline(
+            pipelineManager.getPipeline(pid), false);
       } else {
         LOG.error("unknown pipeline action:{}", action);
       }
     } catch (PipelineNotFoundException e) {
       LOG.warn("Pipeline action {} received for unknown pipeline {}, " +
           "firing close pipeline event.", action, pid);
+      SCMCommand<?> command = new ClosePipelineCommand(pid);
+      try {
+        command.setTerm(scmContext.getTermOfLeader());
+      } catch (NotLeaderException nle) {
+        LOG.warn("Skip sending ClosePipelineCommand for pipeline {}," +
+            " since not leader SCM.", pid);
+        return;
+      }
       publisher.fireEvent(SCMEvents.DATANODE_COMMAND,
-          new CommandForDatanode<>(datanode.getUuid(),
-              new ClosePipelineCommand(pid)));
+          new CommandForDatanode<>(datanode.getUuid(), command));
     } catch (IOException ioe) {
       LOG.error("Could not execute pipeline action={} pipeline={}",
           action, pid, ioe);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
index e1cf382..ed73a64 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
@@ -39,14 +40,16 @@
 
   private Map<ReplicationType, PipelineProvider> providers;
 
-  PipelineFactory(NodeManager nodeManager, PipelineStateManager stateManager,
-      ConfigurationSource conf, EventPublisher eventPublisher) {
+  PipelineFactory(NodeManager nodeManager, StateManager stateManager,
+                  ConfigurationSource conf, EventPublisher eventPublisher,
+                  SCMContext scmContext) {
     providers = new HashMap<>();
     providers.put(ReplicationType.STAND_ALONE,
         new SimplePipelineProvider(nodeManager, stateManager));
     providers.put(ReplicationType.RATIS,
-        new RatisPipelineProvider(nodeManager, stateManager, conf,
-            eventPublisher));
+        new RatisPipelineProvider(nodeManager,
+            stateManager, conf,
+            eventPublisher, scmContext));
   }
 
   protected PipelineFactory() {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index 0cb905e..04985d4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -28,14 +28,13 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 
 /**
  * Interface which exposes the api for pipeline management.
  */
-public interface PipelineManager extends Closeable, PipelineManagerMXBean,
-    EventHandler<SafeModeStatus> {
+public interface PipelineManager extends Closeable, PipelineManagerMXBean {
 
   Pipeline createPipeline(ReplicationType type, ReplicationFactor factor)
       throws IOException;
@@ -55,7 +54,7 @@
       ReplicationFactor factor);
 
   List<Pipeline> getPipelines(ReplicationType type,
-      Pipeline.PipelineState state);
+      Pipeline.PipelineState state) throws NotLeaderException;
 
   List<Pipeline> getPipelines(ReplicationType type,
       ReplicationFactor factor, Pipeline.PipelineState state);
@@ -77,8 +76,7 @@
 
   void openPipeline(PipelineID pipelineId) throws IOException;
 
-  void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout)
-      throws IOException;
+  void closePipeline(Pipeline pipeline, boolean onTimeout) throws IOException;
 
   void scrubPipeline(ReplicationType type, ReplicationFactor factor)
       throws IOException;
@@ -125,4 +123,11 @@
    * @return boolean
    */
   boolean getSafeModeStatus();
+
+  /**
+   * Reinitialize the pipelineManager with the lastest pipeline store
+   * during SCM reload.
+   */
+  void reinitialize(Table<PipelineID, Pipeline> pipelineStore)
+      throws IOException;
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
index 6d7d717..57eab61 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm.pipeline;
 
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 
 import java.util.Map;
 
@@ -33,6 +34,6 @@
    * Returns the number of pipelines in different state.
    * @return state to number of pipeline map
    */
-  Map<String, Integer> getPipelineInfo();
+  Map<String, Integer> getPipelineInfo() throws NotLeaderException;
 
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
new file mode 100644
index 0000000..eb0c948
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
@@ -0,0 +1,605 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.ozone.ClientVersions;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * SCM Pipeline Manager implementation.
+ * All the write operations for pipelines must come via PipelineManager.
+ * It synchronises all write and read operations via a ReadWriteLock.
+ */
+public class PipelineManagerV2Impl implements PipelineManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PipelineManagerV2Impl.class);
+
+  // Limit the number of on-going ratis operation to be 1.
+  private final Lock lock;
+  private PipelineFactory pipelineFactory;
+  private StateManager stateManager;
+  private BackgroundPipelineCreatorV2 backgroundPipelineCreator;
+  private final ConfigurationSource conf;
+  private final EventPublisher eventPublisher;
+  // Pipeline Manager MXBean
+  private ObjectName pmInfoBean;
+  private final SCMPipelineMetrics metrics;
+  private final long pipelineWaitDefaultTimeout;
+  private final SCMHAManager scmhaManager;
+  private final SCMContext scmContext;
+  private final NodeManager nodeManager;
+
+  protected PipelineManagerV2Impl(ConfigurationSource conf,
+                                 SCMHAManager scmhaManager,
+                                 NodeManager nodeManager,
+                                 StateManager pipelineStateManager,
+                                 PipelineFactory pipelineFactory,
+                                 EventPublisher eventPublisher,
+                                 SCMContext scmContext) {
+    this.lock = new ReentrantLock();
+    this.pipelineFactory = pipelineFactory;
+    this.stateManager = pipelineStateManager;
+    this.conf = conf;
+    this.scmhaManager = scmhaManager;
+    this.nodeManager = nodeManager;
+    this.eventPublisher = eventPublisher;
+    this.scmContext = scmContext;
+    this.pmInfoBean = MBeans.register("SCMPipelineManager",
+        "SCMPipelineManagerInfo", this);
+    this.metrics = SCMPipelineMetrics.create();
+    this.pipelineWaitDefaultTimeout = conf.getTimeDuration(
+        HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL,
+        HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT,
+        TimeUnit.MILLISECONDS);
+  }
+
+  public static PipelineManagerV2Impl newPipelineManager(
+      ConfigurationSource conf,
+      SCMHAManager scmhaManager,
+      NodeManager nodeManager,
+      Table<PipelineID, Pipeline> pipelineStore,
+      EventPublisher eventPublisher,
+      SCMContext scmContext,
+      SCMServiceManager serviceManager) throws IOException {
+    // Create PipelineStateManager
+    StateManager stateManager = PipelineStateManagerV2Impl
+        .newBuilder().setPipelineStore(pipelineStore)
+        .setRatisServer(scmhaManager.getRatisServer())
+        .setNodeManager(nodeManager)
+        .setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer())
+        .build();
+
+    // Create PipelineFactory
+    PipelineFactory pipelineFactory = new PipelineFactory(
+        nodeManager, stateManager, conf, eventPublisher, scmContext);
+
+    // Create PipelineManager
+    PipelineManagerV2Impl pipelineManager = new PipelineManagerV2Impl(conf,
+        scmhaManager, nodeManager, stateManager, pipelineFactory,
+        eventPublisher, scmContext);
+
+    // Create background thread.
+    BackgroundPipelineCreatorV2 backgroundPipelineCreator =
+        new BackgroundPipelineCreatorV2(
+            pipelineManager, conf, serviceManager, scmContext);
+
+    pipelineManager.setBackgroundPipelineCreator(backgroundPipelineCreator);
+
+    return pipelineManager;
+  }
+
+  @Override
+  public Pipeline createPipeline(ReplicationType type,
+                                 ReplicationFactor factor) throws IOException {
+    if (!isPipelineCreationAllowed() && factor != ReplicationFactor.ONE) {
+      LOG.debug("Pipeline creation is not allowed until safe mode prechecks " +
+          "complete");
+      throw new IOException("Pipeline creation is not allowed as safe mode " +
+          "prechecks have not yet passed");
+    }
+    lock.lock();
+    try {
+      Pipeline pipeline = pipelineFactory.create(type, factor);
+      stateManager.addPipeline(pipeline.getProtobufMessage(
+          ClientVersions.CURRENT_VERSION));
+      recordMetricsForPipeline(pipeline);
+      return pipeline;
+    } catch (IOException ex) {
+      LOG.error("Failed to create pipeline of type {} and factor {}. " +
+          "Exception: {}", type, factor, ex.getMessage());
+      metrics.incNumPipelineCreationFailed();
+      throw ex;
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  @Override
+  public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor,
+                                 List<DatanodeDetails> nodes) {
+    // This will mostly be used to create dummy pipeline for SimplePipelines.
+    // We don't update the metrics for SimplePipelines.
+    return pipelineFactory.create(type, factor, nodes);
+  }
+
+  @Override
+  public Pipeline getPipeline(PipelineID pipelineID)
+      throws PipelineNotFoundException {
+    return stateManager.getPipeline(pipelineID);
+  }
+
+  @Override
+  public boolean containsPipeline(PipelineID pipelineID) {
+    try {
+      getPipeline(pipelineID);
+      return true;
+    } catch (PipelineNotFoundException e) {
+      return false;
+    }
+  }
+
+  @Override
+  public List<Pipeline> getPipelines() {
+    return stateManager.getPipelines();
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(ReplicationType type) {
+    return stateManager.getPipelines(type);
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(ReplicationType type,
+                                     ReplicationFactor factor) {
+    return stateManager.getPipelines(type, factor);
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(ReplicationType type,
+                                     Pipeline.PipelineState state) {
+    return stateManager.getPipelines(type, state);
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(ReplicationType type,
+                                     ReplicationFactor factor,
+                                     Pipeline.PipelineState state) {
+    return stateManager.getPipelines(type, factor, state);
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(
+      ReplicationType type, ReplicationFactor factor,
+      Pipeline.PipelineState state, Collection<DatanodeDetails> excludeDns,
+      Collection<PipelineID> excludePipelines) {
+    return stateManager
+        .getPipelines(type, factor, state, excludeDns, excludePipelines);
+  }
+
+  @Override
+  public void addContainerToPipeline(
+      PipelineID pipelineID, ContainerID containerID) throws IOException {
+    // should not lock here, since no ratis operation happens.
+    stateManager.addContainerToPipeline(pipelineID, containerID);
+  }
+
+  @Override
+  public void removeContainerFromPipeline(
+      PipelineID pipelineID, ContainerID containerID) throws IOException {
+    // should not lock here, since no ratis operation happens.
+    stateManager.removeContainerFromPipeline(pipelineID, containerID);
+  }
+
+  @Override
+  public NavigableSet<ContainerID> getContainersInPipeline(
+      PipelineID pipelineID) throws IOException {
+    return stateManager.getContainers(pipelineID);
+  }
+
+  @Override
+  public int getNumberOfContainers(PipelineID pipelineID) throws IOException {
+    return stateManager.getNumberOfContainers(pipelineID);
+  }
+
+  @Override
+  public void openPipeline(PipelineID pipelineId) throws IOException {
+    lock.lock();
+    try {
+      Pipeline pipeline = stateManager.getPipeline(pipelineId);
+      if (pipeline.isClosed()) {
+        throw new IOException("Closed pipeline can not be opened");
+      }
+      if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) {
+        LOG.info("Pipeline {} moved to OPEN state", pipeline);
+        stateManager.updatePipelineState(
+            pipelineId.getProtobuf(), HddsProtos.PipelineState.PIPELINE_OPEN);
+      }
+      metrics.incNumPipelineCreated();
+      metrics.createPerPipelineMetrics(pipeline);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Removes the pipeline from the db and pipeline state map.
+   *
+   * @param pipeline - pipeline to be removed
+   * @throws IOException
+   */
+  protected void removePipeline(Pipeline pipeline) throws IOException {
+    pipelineFactory.close(pipeline.getType(), pipeline);
+    PipelineID pipelineID = pipeline.getId();
+    lock.lock();
+    try {
+      stateManager.removePipeline(pipelineID.getProtobuf());
+      metrics.incNumPipelineDestroyed();
+    } catch (IOException ex) {
+      metrics.incNumPipelineDestroyFailed();
+      throw ex;
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Fire events to close all containers related to the input pipeline.
+   * @param pipelineId - ID of the pipeline.
+   * @throws IOException
+   */
+  protected void closeContainersForPipeline(final PipelineID pipelineId)
+      throws IOException {
+    Set<ContainerID> containerIDs = stateManager.getContainers(pipelineId);
+    for (ContainerID containerID : containerIDs) {
+      eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID);
+    }
+  }
+
+  /**
+   * put pipeline in CLOSED state.
+   * @param pipeline - ID of the pipeline.
+   * @param onTimeout - whether to remove pipeline after some time.
+   * @throws IOException
+   */
+  @Override
+  public void closePipeline(Pipeline pipeline, boolean onTimeout)
+      throws IOException {
+    PipelineID pipelineID = pipeline.getId();
+    lock.lock();
+    try {
+      if (!pipeline.isClosed()) {
+        stateManager.updatePipelineState(pipelineID.getProtobuf(),
+            HddsProtos.PipelineState.PIPELINE_CLOSED);
+        LOG.info("Pipeline {} moved to CLOSED state", pipeline);
+      }
+      metrics.removePipelineMetrics(pipelineID);
+    } finally {
+      lock.unlock();
+    }
+    // close containers.
+    closeContainersForPipeline(pipelineID);
+    if (!onTimeout) {
+      // close pipeline right away.
+      removePipeline(pipeline);
+    }
+  }
+
+  /**
+   * Scrub pipelines.
+   * @param type Pipeline type
+   * @param factor Pipeline factor
+   * @throws IOException
+   */
+  @Override
+  public void scrubPipeline(ReplicationType type, ReplicationFactor factor)
+      throws IOException {
+    Instant currentTime = Instant.now();
+    Long pipelineScrubTimeoutInMills = conf.getTimeDuration(
+        ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT,
+        ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT,
+        TimeUnit.MILLISECONDS);
+
+    List<Pipeline> candidates = stateManager.getPipelines(type, factor);
+
+    for (Pipeline p : candidates) {
+      // scrub pipelines who stay ALLOCATED for too long.
+      if (p.getPipelineState() == Pipeline.PipelineState.ALLOCATED &&
+          (currentTime.toEpochMilli() - p.getCreationTimestamp()
+              .toEpochMilli() >= pipelineScrubTimeoutInMills)) {
+        LOG.info("Scrubbing pipeline: id: " + p.getId().toString() +
+            " since it stays at ALLOCATED stage for " +
+            Duration.between(currentTime, p.getCreationTimestamp())
+                .toMinutes() + " mins.");
+        closePipeline(p, false);
+      }
+      // scrub pipelines who stay CLOSED for too long.
+      if (p.getPipelineState() == Pipeline.PipelineState.CLOSED) {
+        LOG.info("Scrubbing pipeline: id: " + p.getId().toString() +
+            " since it stays at CLOSED stage.");
+        closeContainersForPipeline(p.getId());
+        removePipeline(p);
+      }
+    }
+    return;
+  }
+
+  /**
+   * Schedules a fixed interval job to create pipelines.
+   */
+  @Override
+  public void startPipelineCreator() {
+    throw new RuntimeException("Not supported in HA code.");
+  }
+
+  /**
+   * Triggers pipeline creation after the specified time.
+   */
+  @Override
+  public void triggerPipelineCreation() {
+    throw new RuntimeException("Not supported in HA code.");
+  }
+
+  @Override
+  public void incNumBlocksAllocatedMetric(PipelineID id) {
+    metrics.incNumBlocksAllocated(id);
+  }
+
+  @Override
+  public int minHealthyVolumeNum(Pipeline pipeline) {
+    return nodeManager.minHealthyVolumeNum(pipeline.getNodes());
+  }
+
+  @Override
+  public int minPipelineLimit(Pipeline pipeline) {
+    return nodeManager.minPipelineLimit(pipeline.getNodes());
+  }
+
+  /**
+   * Activates a dormant pipeline.
+   *
+   * @param pipelineID ID of the pipeline to activate.
+   * @throws IOException in case of any Exception
+   */
+  @Override
+  public void activatePipeline(PipelineID pipelineID)
+      throws IOException {
+    lock.lock();
+    try {
+      stateManager.updatePipelineState(pipelineID.getProtobuf(),
+              HddsProtos.PipelineState.PIPELINE_OPEN);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Deactivates an active pipeline.
+   *
+   * @param pipelineID ID of the pipeline to deactivate.
+   * @throws IOException in case of any Exception
+   */
+  @Override
+  public void deactivatePipeline(PipelineID pipelineID)
+      throws IOException {
+    lock.lock();
+    try {
+      stateManager.updatePipelineState(pipelineID.getProtobuf(),
+          HddsProtos.PipelineState.PIPELINE_DORMANT);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Wait a pipeline to be OPEN.
+   *
+   * @param pipelineID ID of the pipeline to wait for.
+   * @param timeout    wait timeout, millisecond, 0 to use default value
+   * @throws IOException in case of any Exception, such as timeout
+   */
+  @Override
+  public void waitPipelineReady(PipelineID pipelineID, long timeout)
+      throws IOException {
+    long st = Time.monotonicNow();
+    if (timeout == 0) {
+      timeout = pipelineWaitDefaultTimeout;
+    }
+
+    boolean ready;
+    Pipeline pipeline;
+    do {
+      try {
+        pipeline = stateManager.getPipeline(pipelineID);
+      } catch (PipelineNotFoundException e) {
+        throw new PipelineNotFoundException(String.format(
+            "Pipeline %s cannot be found", pipelineID));
+      }
+      ready = pipeline.isOpen();
+      if (!ready) {
+        try {
+          Thread.sleep((long)100);
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+        }
+      }
+    } while (!ready && Time.monotonicNow() - st < timeout);
+
+    if (!ready) {
+      throw new IOException(String.format("Pipeline %s is not ready in %d ms",
+          pipelineID, timeout));
+    }
+  }
+
+  @Override
+  public Map<String, Integer> getPipelineInfo() throws NotLeaderException {
+    final Map<String, Integer> pipelineInfo = new HashMap<>();
+    for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) {
+      pipelineInfo.put(state.toString(), 0);
+    }
+    stateManager.getPipelines().forEach(pipeline ->
+        pipelineInfo.computeIfPresent(
+            pipeline.getPipelineState().toString(), (k, v) -> v + 1));
+    return pipelineInfo;
+  }
+
+  /**
+   * Get SafeMode status.
+   * @return boolean
+   */
+  @Override
+  public boolean getSafeModeStatus() {
+    return scmContext.isInSafeMode();
+  }
+
+  @Override
+  public void reinitialize(Table<PipelineID, Pipeline> pipelineStore)
+      throws IOException {
+    stateManager.reinitialize(pipelineStore);
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (backgroundPipelineCreator != null) {
+      backgroundPipelineCreator.stop();
+    }
+
+    if(pmInfoBean != null) {
+      MBeans.unregister(this.pmInfoBean);
+      pmInfoBean = null;
+    }
+
+    SCMPipelineMetrics.unRegister();
+
+    // shutdown pipeline provider.
+    pipelineFactory.shutdown();
+    try {
+      stateManager.close();
+    } catch (Exception ex) {
+      LOG.error("PipelineStateManager close failed", ex);
+    }
+  }
+
+  @VisibleForTesting
+  public boolean isPipelineCreationAllowed() {
+    return scmContext.isLeader() && scmContext.isPreCheckComplete();
+  }
+
+  @VisibleForTesting
+  public void setPipelineProvider(ReplicationType replicationType,
+                                  PipelineProvider provider) {
+    pipelineFactory.setProvider(replicationType, provider);
+  }
+
+  @VisibleForTesting
+  public StateManager getStateManager() {
+    return stateManager;
+  }
+
+  @VisibleForTesting
+  public SCMHAManager getScmhaManager() {
+    return scmhaManager;
+  }
+
+  private void setBackgroundPipelineCreator(
+      BackgroundPipelineCreatorV2 backgroundPipelineCreator) {
+    this.backgroundPipelineCreator = backgroundPipelineCreator;
+  }
+
+  @VisibleForTesting
+  public BackgroundPipelineCreatorV2 getBackgroundPipelineCreator() {
+    return this.backgroundPipelineCreator;
+  }
+
+  @VisibleForTesting
+  public PipelineFactory getPipelineFactory() {
+    return pipelineFactory;
+  }
+
+  private void recordMetricsForPipeline(Pipeline pipeline) {
+    metrics.incNumPipelineAllocated();
+    if (pipeline.isOpen()) {
+      metrics.incNumPipelineCreated();
+      metrics.createPerPipelineMetrics(pipeline);
+    }
+    switch (pipeline.getType()) {
+    case STAND_ALONE:
+      return;
+    case RATIS:
+      List<Pipeline> overlapPipelines = RatisPipelineUtils
+          .checkPipelineContainSameDatanodes(stateManager, pipeline);
+      if (!overlapPipelines.isEmpty()) {
+        // Count 1 overlap at a time.
+        metrics.incNumPipelineContainSameDatanodes();
+        //TODO remove until pipeline allocation is proved equally distributed.
+        for (Pipeline overlapPipeline : overlapPipelines) {
+          LOG.info("Pipeline: " + pipeline.getId().toString() +
+              " contains same datanodes as previous pipelines: " +
+              overlapPipeline.getId().toString() + " nodeIds: " +
+              pipeline.getNodes().get(0).getUuid().toString() +
+              ", " + pipeline.getNodes().get(1).getUuid().toString() +
+              ", " + pipeline.getNodes().get(2).getUuid().toString());
+        }
+      }
+      return;
+    case CHAINED:
+      // Not supported.
+    default:
+      // Not supported.
+      return;
+    }
+  }
+
+  protected Lock getLock() {
+    return lock;
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index 3e44b51..0482085 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -52,7 +52,7 @@
   static final Logger LOG =
       LoggerFactory.getLogger(PipelinePlacementPolicy.class);
   private final NodeManager nodeManager;
-  private final PipelineStateManager stateManager;
+  private final StateManager stateManager;
   private final ConfigurationSource conf;
   private final int heavyNodeCriteria;
   private static final int REQUIRED_RACKS = 2;
@@ -71,7 +71,8 @@
    * @param conf        Configuration
    */
   public PipelinePlacementPolicy(final NodeManager nodeManager,
-      final PipelineStateManager stateManager, final ConfigurationSource conf) {
+                                 final StateManager stateManager,
+                                 final ConfigurationSource conf) {
     super(nodeManager, conf);
     this.nodeManager = nodeManager;
     this.conf = conf;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
index 8df976c..e459fcc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
@@ -37,10 +37,10 @@
 public abstract class PipelineProvider {
 
   private final NodeManager nodeManager;
-  private final PipelineStateManager stateManager;
+  private final StateManager stateManager;
 
   public PipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager) {
+      StateManager stateManager) {
     this.nodeManager = nodeManager;
     this.stateManager = stateManager;
   }
@@ -54,7 +54,7 @@
     return nodeManager;
   }
 
-  public PipelineStateManager getPipelineStateManager() {
+  public StateManager getPipelineStateManager() {
     return stateManager;
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index ac6a4ad..8fc7f3e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.safemode.SafeModeManager;
 import org.apache.hadoop.hdds.scm.server
     .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
@@ -36,6 +37,7 @@
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -52,14 +54,18 @@
   private final PipelineManager pipelineManager;
   private final ConfigurationSource conf;
   private final SafeModeManager scmSafeModeManager;
+  private final SCMContext scmContext;
   private final boolean pipelineAvailabilityCheck;
   private final SCMPipelineMetrics metrics;
 
   public PipelineReportHandler(SafeModeManager scmSafeModeManager,
-      PipelineManager pipelineManager, ConfigurationSource conf) {
+                               PipelineManager pipelineManager,
+                               SCMContext scmContext,
+                               ConfigurationSource conf) {
     Preconditions.checkNotNull(pipelineManager);
     this.scmSafeModeManager = scmSafeModeManager;
     this.pipelineManager = pipelineManager;
+    this.scmContext = scmContext;
     this.conf = conf;
     this.metrics = SCMPipelineMetrics.create();
     this.pipelineAvailabilityCheck = conf.getBoolean(
@@ -96,11 +102,10 @@
     try {
       pipeline = pipelineManager.getPipeline(pipelineID);
     } catch (PipelineNotFoundException e) {
-      final ClosePipelineCommand closeCommand =
-          new ClosePipelineCommand(pipelineID);
-      final CommandForDatanode datanodeCommand =
-          new CommandForDatanode<>(dn.getUuid(), closeCommand);
-      publisher.fireEvent(SCMEvents.DATANODE_COMMAND, datanodeCommand);
+      SCMCommand<?> command = new ClosePipelineCommand(pipelineID);
+      command.setTerm(scmContext.getTermOfLeader());
+      publisher.fireEvent(SCMEvents.DATANODE_COMMAND,
+          new CommandForDatanode<>(dn.getUuid(), command));
       return;
     }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index 8bc5bd5..581477e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -19,10 +19,12 @@
 package org.apache.hadoop.hdds.scm.pipeline;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
+import org.apache.hadoop.hdds.utils.db.Table;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -38,7 +40,7 @@
  * state. All the read and write operations in PipelineStateMap are protected
  * by a read write lock.
  */
-public class PipelineStateManager {
+public class PipelineStateManager implements StateManager {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(PipelineStateManager.class);
@@ -48,72 +50,90 @@
   public PipelineStateManager() {
     this.pipelineStateMap = new PipelineStateMap();
   }
-
+  @Override
   public void addPipeline(Pipeline pipeline) throws IOException {
     pipelineStateMap.addPipeline(pipeline);
     LOG.info("Created pipeline {}", pipeline);
   }
 
-  void addContainerToPipeline(PipelineID pipelineId, ContainerID containerID)
+  @Override
+  public void addContainerToPipeline(PipelineID pipelineId,
+                                     ContainerID containerID)
       throws IOException {
     pipelineStateMap.addContainerToPipeline(pipelineId, containerID);
   }
 
+  @Override
   public Pipeline getPipeline(PipelineID pipelineID)
       throws PipelineNotFoundException {
     return pipelineStateMap.getPipeline(pipelineID);
   }
 
+  @Override
   public List<Pipeline> getPipelines() {
     return pipelineStateMap.getPipelines();
   }
 
-  List<Pipeline> getPipelines(ReplicationType type) {
+  @Override
+  public List<Pipeline> getPipelines(ReplicationType type) {
     return pipelineStateMap.getPipelines(type);
   }
 
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor) {
+  @Override
+  public List<Pipeline> getPipelines(ReplicationType type,
+                                     ReplicationFactor factor) {
     return pipelineStateMap.getPipelines(type, factor);
   }
 
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor,
+  @Override
+  public List<Pipeline> getPipelines(ReplicationType type,
+                                     ReplicationFactor factor,
       PipelineState state) {
     return pipelineStateMap.getPipelines(type, factor, state);
   }
 
-  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor,
+  @Override
+  public List<Pipeline> getPipelines(
+      ReplicationType type, ReplicationFactor factor,
       PipelineState state, Collection<DatanodeDetails> excludeDns,
       Collection<PipelineID> excludePipelines) {
     return pipelineStateMap
         .getPipelines(type, factor, state, excludeDns, excludePipelines);
   }
 
-  List<Pipeline> getPipelines(ReplicationType type, PipelineState... states) {
+  @Override
+  public List<Pipeline> getPipelines(ReplicationType type,
+                                     PipelineState... states) {
     return pipelineStateMap.getPipelines(type, states);
   }
 
-  NavigableSet<ContainerID> getContainers(PipelineID pipelineID)
+  @Override
+  public NavigableSet<ContainerID> getContainers(PipelineID pipelineID)
       throws IOException {
     return pipelineStateMap.getContainers(pipelineID);
   }
 
-  int getNumberOfContainers(PipelineID pipelineID) throws IOException {
+  @Override
+  public int getNumberOfContainers(PipelineID pipelineID) throws IOException {
     return pipelineStateMap.getNumberOfContainers(pipelineID);
   }
 
-  Pipeline removePipeline(PipelineID pipelineID) throws IOException {
+  @Override
+  public Pipeline removePipeline(PipelineID pipelineID) throws IOException {
     Pipeline pipeline = pipelineStateMap.removePipeline(pipelineID);
     LOG.info("Pipeline {} removed from db", pipeline);
     return pipeline;
   }
 
-  void removeContainerFromPipeline(PipelineID pipelineID,
+  @Override
+  public void removeContainerFromPipeline(PipelineID pipelineID,
       ContainerID containerID) throws IOException {
     pipelineStateMap.removeContainerFromPipeline(pipelineID, containerID);
   }
 
-  Pipeline finalizePipeline(PipelineID pipelineId)
-      throws PipelineNotFoundException {
+  @Override
+  public Pipeline finalizePipeline(PipelineID pipelineId)
+      throws IOException {
     Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId);
     if (!pipeline.isClosed()) {
       pipeline = pipelineStateMap
@@ -123,7 +143,8 @@
     return pipeline;
   }
 
-  Pipeline openPipeline(PipelineID pipelineId) throws IOException {
+  @Override
+  public Pipeline openPipeline(PipelineID pipelineId) throws IOException {
     Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId);
     if (pipeline.isClosed()) {
       throw new IOException("Closed pipeline can not be opened");
@@ -142,6 +163,7 @@
    * @param pipelineID ID of the pipeline to activate.
    * @throws IOException in case of any Exception
    */
+  @Override
   public void activatePipeline(PipelineID pipelineID)
       throws IOException {
     pipelineStateMap
@@ -154,14 +176,45 @@
    * @param pipelineID ID of the pipeline to deactivate.
    * @throws IOException in case of any Exception
    */
+  @Override
   public void deactivatePipeline(PipelineID pipelineID)
       throws IOException {
     pipelineStateMap
         .updatePipelineState(pipelineID, PipelineState.DORMANT);
   }
 
+  @Override
+  public void reinitialize(Table<PipelineID, Pipeline> pipelineStore)
+      throws IOException {
+  }
+
+  @Override
   public void updatePipelineState(PipelineID id, PipelineState newState)
       throws PipelineNotFoundException {
     pipelineStateMap.updatePipelineState(id, newState);
   }
+
+  @Override
+  public void addPipeline(HddsProtos.Pipeline pipelineProto)
+      throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public void removePipeline(HddsProtos.PipelineID pipelineIDProto)
+      throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public void updatePipelineState(
+      HddsProtos.PipelineID pipelineIDProto, HddsProtos.PipelineState newState)
+      throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public void close() {
+    // Do nothing
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java
new file mode 100644
index 0000000..c84a852
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
+import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.reflect.Proxy;
+import java.util.Collection;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Implementation of pipeline state manager.
+ * PipelineStateMap class holds the data structures related to pipeline and its
+ * state. All the read and write operations in PipelineStateMap are protected
+ * by a read write lock.
+ */
+public class PipelineStateManagerV2Impl implements StateManager {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PipelineStateManager.class);
+
+  private PipelineStateMap pipelineStateMap;
+  private final NodeManager nodeManager;
+  private Table<PipelineID, Pipeline> pipelineStore;
+  private final DBTransactionBuffer transactionBuffer;
+
+  // Protect potential contentions between RaftServer and PipelineManager.
+  // See https://issues.apache.org/jira/browse/HDDS-4560
+  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+
+  public PipelineStateManagerV2Impl(
+      Table<PipelineID, Pipeline> pipelineStore, NodeManager nodeManager,
+      DBTransactionBuffer buffer) throws IOException {
+    this.pipelineStateMap = new PipelineStateMap();
+    this.nodeManager = nodeManager;
+    this.pipelineStore = pipelineStore;
+    this.transactionBuffer = buffer;
+    initialize();
+  }
+
+  private void initialize() throws IOException {
+    if (pipelineStore == null || nodeManager == null) {
+      throw new IOException("PipelineStore cannot be null");
+    }
+    if (pipelineStore.isEmpty()) {
+      LOG.info("No pipeline exists in current db");
+      return;
+    }
+    TableIterator<PipelineID, ? extends Table.KeyValue<PipelineID, Pipeline>>
+        iterator = pipelineStore.iterator();
+    while (iterator.hasNext()) {
+      Pipeline pipeline = iterator.next().getValue();
+      pipelineStateMap.addPipeline(pipeline);
+      nodeManager.addPipeline(pipeline);
+    }
+  }
+
+  @Override
+  public void addPipeline(HddsProtos.Pipeline pipelineProto)
+      throws IOException {
+    lock.writeLock().lock();
+    try {
+      Pipeline pipeline = Pipeline.getFromProtobuf(pipelineProto);
+      if (pipelineStore != null) {
+        transactionBuffer
+            .addToBuffer(pipelineStore, pipeline.getId(), pipeline);
+        pipelineStateMap.addPipeline(pipeline);
+        nodeManager.addPipeline(pipeline);
+        LOG.info("Created pipeline {}.", pipeline);
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void addContainerToPipeline(
+      PipelineID pipelineId, ContainerID containerID)
+      throws IOException {
+    lock.writeLock().lock();
+    try {
+      pipelineStateMap.addContainerToPipeline(pipelineId, containerID);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public Pipeline getPipeline(PipelineID pipelineID)
+      throws PipelineNotFoundException {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap.getPipeline(pipelineID);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public List<Pipeline> getPipelines() {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap.getPipelines();
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(HddsProtos.ReplicationType type) {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap.getPipelines(type);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(
+      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor) {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap.getPipelines(type, factor);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(
+      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
+                              Pipeline.PipelineState state) {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap.getPipelines(type, factor, state);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(
+      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
+      Pipeline.PipelineState state, Collection<DatanodeDetails> excludeDns,
+      Collection<PipelineID> excludePipelines) {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap
+          .getPipelines(type, factor, state, excludeDns, excludePipelines);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(HddsProtos.ReplicationType type,
+                                     Pipeline.PipelineState... states) {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap.getPipelines(type, states);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public NavigableSet<ContainerID> getContainers(PipelineID pipelineID)
+      throws IOException {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap.getContainers(pipelineID);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public int getNumberOfContainers(PipelineID pipelineID) throws IOException {
+    lock.readLock().lock();
+    try {
+      return pipelineStateMap.getNumberOfContainers(pipelineID);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void removePipeline(HddsProtos.PipelineID pipelineIDProto)
+      throws IOException {
+    lock.writeLock().lock();
+    try {
+      PipelineID pipelineID = PipelineID.getFromProtobuf(pipelineIDProto);
+      if (pipelineStore != null) {
+        transactionBuffer.removeFromBuffer(pipelineStore, pipelineID);
+      }
+      Pipeline pipeline = pipelineStateMap.removePipeline(pipelineID);
+      nodeManager.removePipeline(pipeline);
+      LOG.info("Pipeline {} removed.", pipeline);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+
+  @Override
+  public void removeContainerFromPipeline(
+      PipelineID pipelineID, ContainerID containerID) throws IOException {
+    lock.writeLock().lock();
+    try {
+      // Typica;;y, SCM can send a pipeline close Action to datanode and receive
+      // pipelineCloseAction to close the pipeline which will remove the
+      // pipelineId both from the piplineStateMap as well as
+      // pipeline2containerMap Subsequently, close container handler event can
+      // also try to close the container as a part of which , it will also
+      // try to remove the container from the pipeline2container Map which will
+      // fail with PipelineNotFoundException. These are executed over ratis, and
+      // if the exception is propagated to SCMStateMachine., it will bring down
+      // the SCM. Ignoring it here.
+      pipelineStateMap.removeContainerFromPipeline(pipelineID, containerID);
+    } catch (PipelineNotFoundException pnfe) {
+      LOG.info("Pipeline {} is not found in the pipeline2ContainerMap. Pipeline"
+          + " may have been closed already.", pipelineID);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void updatePipelineState(
+      HddsProtos.PipelineID pipelineIDProto, HddsProtos.PipelineState newState)
+      throws IOException {
+    PipelineID pipelineID = PipelineID.getFromProtobuf(pipelineIDProto);
+    Pipeline.PipelineState oldState =
+        getPipeline(pipelineID).getPipelineState();
+    lock.writeLock().lock();
+    try {
+      // null check is here to prevent the case where SCM store
+      // is closed but the staleNode handlers/pipeline creations
+      // still try to access it.
+      if (pipelineStore != null) {
+        pipelineStateMap.updatePipelineState(pipelineID,
+            Pipeline.PipelineState.fromProtobuf(newState));
+        transactionBuffer
+            .addToBuffer(pipelineStore, pipelineID, getPipeline(pipelineID));
+      }
+    } catch (IOException ex) {
+      LOG.warn("Pipeline {} state update failed", pipelineID);
+      // revert back to old state in memory
+      pipelineStateMap.updatePipelineState(pipelineID, oldState);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void close() throws Exception {
+    lock.writeLock().lock();
+    try {
+      if (pipelineStore != null) {
+        pipelineStore.close();
+        pipelineStore = null;
+      }
+    } catch (Exception ex) {
+      LOG.error("Pipeline  store close failed", ex);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  // TODO Remove legacy
+  @Override
+  public void addPipeline(Pipeline pipeline) throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public Pipeline removePipeline(PipelineID pipelineID) throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public void updatePipelineState(PipelineID id,
+                                  Pipeline.PipelineState newState)
+      throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public Pipeline finalizePipeline(PipelineID pipelineId)
+      throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+
+  @Override
+  public Pipeline openPipeline(PipelineID pipelineId) throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public void activatePipeline(PipelineID pipelineID) throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public void deactivatePipeline(PipelineID pipelineID) throws IOException {
+    throw new IOException("Not supported.");
+  }
+
+  @Override
+  public void reinitialize(Table<PipelineID, Pipeline> store)
+      throws IOException {
+    lock.writeLock().lock();
+    try {
+      pipelineStore.close();
+      this.pipelineStateMap = new PipelineStateMap();
+      this.pipelineStore = store;
+      initialize();
+    } catch (Exception ex) {
+      LOG.error("PipelineManager reinitialization close failed", ex);
+      throw new IOException(ex);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  // legacy interfaces end
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for PipelineStateManager.
+   */
+  public static class Builder {
+    private Table<PipelineID, Pipeline> pipelineStore;
+    private NodeManager nodeManager;
+    private SCMRatisServer scmRatisServer;
+    private DBTransactionBuffer transactionBuffer;
+
+    public Builder setSCMDBTransactionBuffer(DBTransactionBuffer buffer) {
+      this.transactionBuffer = buffer;
+      return this;
+    }
+
+    public Builder setRatisServer(final SCMRatisServer ratisServer) {
+      scmRatisServer = ratisServer;
+      return this;
+    }
+
+    public Builder setNodeManager(final NodeManager scmNodeManager) {
+      nodeManager = scmNodeManager;
+      return this;
+    }
+
+    public Builder setPipelineStore(
+        final Table<PipelineID, Pipeline> pipelineTable) {
+      this.pipelineStore = pipelineTable;
+      return this;
+    }
+
+    public StateManager build() throws IOException {
+      Preconditions.checkNotNull(pipelineStore);
+
+      final StateManager pipelineStateManager =
+          new PipelineStateManagerV2Impl(
+              pipelineStore, nodeManager, transactionBuffer);
+
+      final SCMHAInvocationHandler invocationHandler =
+          new SCMHAInvocationHandler(SCMRatisProtocol.RequestType.PIPELINE,
+              pipelineStateManager, scmRatisServer);
+
+      return (StateManager) Proxy.newProxyInstance(
+          SCMHAInvocationHandler.class.getClassLoader(),
+          new Class<?>[]{StateManager.class}, invocationHandler);
+    }
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 45aafb0..64101a5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
@@ -38,6 +39,7 @@
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand;
 
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -56,14 +58,18 @@
   private int pipelineNumberLimit;
   private int maxPipelinePerDatanode;
   private final LeaderChoosePolicy leaderChoosePolicy;
+  private final SCMContext scmContext;
 
   @VisibleForTesting
   public RatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager, ConfigurationSource conf,
-      EventPublisher eventPublisher) {
+                               StateManager stateManager,
+                               ConfigurationSource conf,
+                               EventPublisher eventPublisher,
+                               SCMContext scmContext) {
     super(nodeManager, stateManager);
     this.conf = conf;
     this.eventPublisher = eventPublisher;
+    this.scmContext = scmContext;
     this.placementPolicy =
         new PipelinePlacementPolicy(nodeManager, stateManager, conf);
     this.pipelineNumberLimit = conf.getInt(
@@ -157,6 +163,8 @@
         new CreatePipelineCommand(pipeline.getId(), pipeline.getType(),
             factor, dns);
 
+    createCommand.setTerm(scmContext.getTermOfLeader());
+
     dns.forEach(node -> {
       LOG.info("Sending CreatePipelineCommand for pipeline:{} to datanode:{}",
           pipeline.getId(), node.getUuidString());
@@ -187,15 +195,16 @@
    * Removes pipeline from SCM. Sends command to destroy pipeline on all
    * the datanodes.
    *
-   * @param pipeline        - Pipeline to be destroyed
-   * @throws IOException
+   * @param pipeline            - Pipeline to be destroyed
+   * @throws NotLeaderException - Send datanode command while not leader
    */
   @Override
-  public void close(Pipeline pipeline) {
+  public void close(Pipeline pipeline) throws NotLeaderException {
     final ClosePipelineCommand closeCommand =
         new ClosePipelineCommand(pipeline.getId());
-    pipeline.getNodes().stream().forEach(node -> {
-      final CommandForDatanode datanodeCommand =
+    closeCommand.setTerm(scmContext.getTermOfLeader());
+    pipeline.getNodes().forEach(node -> {
+      final CommandForDatanode<?> datanodeCommand =
           new CommandForDatanode<>(node.getUuid(), closeCommand);
       LOG.info("Send pipeline:{} close command to datanode {}",
           pipeline.getId(), datanodeCommand.getDatanodeId());
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
index 97bff9a..7940f6d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
@@ -117,4 +117,23 @@
                 p.sameDatanodes(pipeline)))
         .collect(Collectors.toList());
   }
+
+  /**
+   * Return the list of pipelines who share the same set of datanodes
+   * with the input pipeline.
+   *
+   * @param stateManager PipelineStateManager
+   * @param pipeline input pipeline
+   * @return list of matched pipeline
+   */
+  static List<Pipeline> checkPipelineContainSameDatanodes(
+      StateManager stateManager, Pipeline pipeline) {
+    return stateManager.getPipelines(
+        HddsProtos.ReplicationType.RATIS,
+        HddsProtos.ReplicationFactor.THREE)
+        .stream().filter(p -> !p.getId().equals(pipeline.getId()) &&
+            (p.getPipelineState() != Pipeline.PipelineState.CLOSED &&
+                p.sameDatanodes(pipeline)))
+        .collect(Collectors.toList());
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index b188b42..b22feab 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -32,7 +32,6 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -43,8 +42,10 @@
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
+import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.utils.Scheduler;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -64,14 +65,15 @@
  * for pipelines must come via PipelineManager. It synchronises all write
  * and read operations via a ReadWriteLock.
  */
-public class SCMPipelineManager implements PipelineManager {
+public class SCMPipelineManager implements
+    PipelineManager, EventHandler<SafeModeStatus> {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(SCMPipelineManager.class);
 
   private final ReadWriteLock lock;
   private PipelineFactory pipelineFactory;
-  private PipelineStateManager stateManager;
+  private StateManager stateManager;
   private final BackgroundPipelineCreator backgroundPipelineCreator;
   private Scheduler scheduler;
 
@@ -98,7 +100,7 @@
     this(conf, nodeManager, pipelineStore, eventPublisher, null, null);
     this.stateManager = new PipelineStateManager();
     this.pipelineFactory = new PipelineFactory(nodeManager,
-        stateManager, conf, eventPublisher);
+        stateManager, conf, eventPublisher, SCMContext.emptyContext());
     this.pipelineStore = pipelineStore;
     initializePipelineState();
   }
@@ -136,7 +138,7 @@
     this.pipelineCreationAllowed = new AtomicBoolean(!this.isInSafeMode.get());
   }
 
-  public PipelineStateManager getStateManager() {
+  public StateManager getStateManager() {
     return stateManager;
   }
 
@@ -409,7 +411,7 @@
   }
 
   private void updatePipelineStateInDb(PipelineID pipelineId,
-                                       Pipeline.PipelineState state)
+                                       Pipeline.PipelineState oldState)
           throws IOException {
     // null check is here to prevent the case where SCM store
     // is closed but the staleNode handlers/pipleine creations
@@ -418,9 +420,9 @@
       try {
         pipelineStore.put(pipelineId, getPipeline(pipelineId));
       } catch (IOException ex) {
-        LOG.info("Pipeline {} state update failed", pipelineId);
+        LOG.warn("Pipeline {} state update failed", pipelineId);
         // revert back to old state in memory
-        stateManager.updatePipelineState(pipelineId, state);
+        stateManager.updatePipelineState(pipelineId, oldState);
       }
     }
   }
@@ -468,33 +470,53 @@
   }
 
   /**
-   * Finalizes pipeline in the SCM. Removes pipeline and makes rpc call to
-   * destroy pipeline on the datanodes immediately or after timeout based on the
-   * value of onTimeout parameter.
-   *
-   * @param pipeline        - Pipeline to be destroyed
-   * @param onTimeout       - if true pipeline is removed and destroyed on
-   *                        datanodes after timeout
+   * Fire events to close all containers related to the input pipeline.
+   * @param pipelineId - ID of the pipeline.
    * @throws IOException
    */
-  @Override
-  public void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout)
+  protected void closeContainersForPipeline(final PipelineID pipelineId)
       throws IOException {
-    LOG.info("Destroying pipeline:{}", pipeline);
-    finalizePipeline(pipeline.getId());
-    if (onTimeout) {
-      long pipelineDestroyTimeoutInMillis =
-          conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT,
-              ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT,
-              TimeUnit.MILLISECONDS);
-      scheduler.schedule(() -> destroyPipeline(pipeline),
-          pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS, LOG,
-          String.format("Destroy pipeline failed for pipeline:%s", pipeline));
-    } else {
-      destroyPipeline(pipeline);
+    Set<ContainerID> containerIDs = stateManager.getContainers(pipelineId);
+    for (ContainerID containerID : containerIDs) {
+      eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID);
     }
   }
 
+  /**
+   * put pipeline in CLOSED state.
+   * @param pipeline - ID of the pipeline.
+   * @param onTimeout - whether to remove pipeline after some time.
+   * @throws IOException
+   */
+  @Override
+  public void closePipeline(Pipeline pipeline, boolean onTimeout)
+      throws IOException {
+    PipelineID pipelineID = pipeline.getId();
+    lock.writeLock().lock();
+    try {
+      if (!pipeline.isClosed()) {
+        stateManager.updatePipelineState(pipelineID,
+            Pipeline.PipelineState.CLOSED);
+        LOG.info("Pipeline {} moved to CLOSED state", pipeline);
+      }
+      metrics.removePipelineMetrics(pipelineID);
+    } finally {
+      lock.writeLock().unlock();
+    }
+    // close containers.
+    closeContainersForPipeline(pipelineID);
+    if (!onTimeout) {
+      // close pipeline right away.
+      removePipeline(pipeline);
+    }
+  }
+
+  /**
+   * Scrub pipelines.
+   * @param type Pipeline type
+   * @param factor Pipeline factor
+   * @throws IOException
+   */
   @Override
   public void scrubPipeline(ReplicationType type, ReplicationFactor factor)
       throws IOException{
@@ -507,18 +529,29 @@
         ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT,
         ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT,
         TimeUnit.MILLISECONDS);
-    List<Pipeline> needToSrubPipelines = stateManager.getPipelines(type, factor,
-        Pipeline.PipelineState.ALLOCATED).stream()
-        .filter(p -> currentTime.toEpochMilli() - p.getCreationTimestamp()
-            .toEpochMilli() >= pipelineScrubTimeoutInMills)
-        .collect(Collectors.toList());
-    for (Pipeline p : needToSrubPipelines) {
-      LOG.info("Scrubbing pipeline: id: " + p.getId().toString() +
-          " since it stays at ALLOCATED stage for " +
-          Duration.between(currentTime, p.getCreationTimestamp()).toMinutes() +
-          " mins.");
-      finalizeAndDestroyPipeline(p, false);
+
+    List<Pipeline> candidates = stateManager.getPipelines(type, factor);
+
+    for (Pipeline p : candidates) {
+      // scrub pipelines who stay ALLOCATED for too long.
+      if (p.getPipelineState() == Pipeline.PipelineState.ALLOCATED &&
+          (currentTime.toEpochMilli() - p.getCreationTimestamp()
+              .toEpochMilli() >= pipelineScrubTimeoutInMills)) {
+        LOG.info("Scrubbing pipeline: id: " + p.getId().toString() +
+            " since it stays at ALLOCATED stage for " +
+            Duration.between(currentTime, p.getCreationTimestamp())
+                .toMinutes() + " mins.");
+        closePipeline(p, false);
+      }
+      // scrub pipelines who stay CLOSED for too long.
+      if (p.getPipelineState() == Pipeline.PipelineState.CLOSED) {
+        LOG.info("Scrubbing pipeline: id: " + p.getId().toString() +
+            " since it is at CLOSED stage.");
+        closeContainersForPipeline(p.getId());
+        removePipeline(p);
+      }
     }
+    return;
   }
 
   @Override
@@ -630,56 +663,20 @@
   }
 
   /**
-   * Moves the pipeline to CLOSED state and sends close container command for
-   * all the containers in the pipeline.
-   *
-   * @param pipelineId - ID of the pipeline to be moved to CLOSED state.
-   * @throws IOException
-   */
-  private void finalizePipeline(PipelineID pipelineId) throws IOException {
-    lock.writeLock().lock();
-    try {
-      Pipeline.PipelineState state = stateManager.
-              getPipeline(pipelineId).getPipelineState();
-      stateManager.finalizePipeline(pipelineId);
-      updatePipelineStateInDb(pipelineId, state);
-      Set<ContainerID> containerIDs = stateManager.getContainers(pipelineId);
-      for (ContainerID containerID : containerIDs) {
-        eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID);
-      }
-      metrics.removePipelineMetrics(pipelineId);
-    } finally {
-      lock.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all
-   * the datanodes for ratis pipelines.
-   *
-   * @param pipeline        - Pipeline to be destroyed
-   * @throws IOException
-   */
-  protected void destroyPipeline(Pipeline pipeline) throws IOException {
-    pipelineFactory.close(pipeline.getType(), pipeline);
-    // remove the pipeline from the pipeline manager
-    removePipeline(pipeline.getId());
-    triggerPipelineCreation();
-  }
-
-  /**
    * Removes the pipeline from the db and pipeline state map.
    *
-   * @param pipelineId - ID of the pipeline to be removed
+   * @param pipeline - pipeline to be removed
    * @throws IOException
    */
-  protected void removePipeline(PipelineID pipelineId) throws IOException {
+  protected void removePipeline(Pipeline pipeline) throws IOException {
+    pipelineFactory.close(pipeline.getType(), pipeline);
+    PipelineID pipelineID = pipeline.getId();
     lock.writeLock().lock();
     try {
       if (pipelineStore != null) {
-        pipelineStore.delete(pipelineId);
-        Pipeline pipeline = stateManager.removePipeline(pipelineId);
-        nodeManager.removePipeline(pipeline);
+        pipelineStore.delete(pipelineID);
+        Pipeline pipelineRemoved = stateManager.removePipeline(pipelineID);
+        nodeManager.removePipeline(pipelineRemoved);
         metrics.incNumPipelineDestroyed();
       }
     } catch (IOException ex) {
@@ -762,6 +759,12 @@
     return this.isInSafeMode.get();
   }
 
+  @Override
+  public void reinitialize(Table<PipelineID, Pipeline> store)
+      throws IOException {
+    throw new RuntimeException("Not supported operation.");
+  }
+
   public Table<PipelineID, Pipeline> getPipelineStore() {
     return pipelineStore;
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
index f1e6c1b..9973a0a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
@@ -34,7 +34,7 @@
 public class SimplePipelineProvider extends PipelineProvider {
 
   public SimplePipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager) {
+      StateManager stateManager) {
     super(nodeManager, stateManager);
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java
new file mode 100644
index 0000000..e8a3902
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.metadata.Replicate;
+import org.apache.hadoop.hdds.utils.db.Table;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.NavigableSet;
+
+/**
+ * Manages the state of pipelines in SCM.
+ * TODO Rename to PipelineStateManager once the old state manager is removed.
+ */
+public interface StateManager {
+
+  /**
+   * Adding pipeline would be replicated to Ratis.
+   * @param pipelineProto
+   * @throws IOException
+   */
+  @Replicate
+  void addPipeline(HddsProtos.Pipeline pipelineProto) throws IOException;
+
+  /**
+   * Removing pipeline would be replicated to Ratis.
+   * @param pipelineIDProto
+   * @return Pipeline removed
+   * @throws IOException
+   */
+  @Replicate
+  void removePipeline(HddsProtos.PipelineID pipelineIDProto)
+      throws IOException;
+
+  /**
+   * Updating pipeline state would be replicated to Ratis.
+   * @param pipelineIDProto
+   * @param newState
+   * @throws IOException
+   */
+  @Replicate
+  void updatePipelineState(HddsProtos.PipelineID pipelineIDProto,
+                           HddsProtos.PipelineState newState)
+      throws IOException;
+
+  void addContainerToPipeline(PipelineID pipelineID,
+                              ContainerID containerID) throws IOException;
+
+  Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException;
+
+  List<Pipeline> getPipelines();
+
+  List<Pipeline> getPipelines(HddsProtos.ReplicationType type);
+
+  List<Pipeline> getPipelines(HddsProtos.ReplicationType type,
+                              HddsProtos.ReplicationFactor factor);
+
+  List<Pipeline> getPipelines(HddsProtos.ReplicationType type,
+                              HddsProtos.ReplicationFactor factor,
+                              Pipeline.PipelineState state);
+
+  List<Pipeline> getPipelines(HddsProtos.ReplicationType type,
+                              HddsProtos.ReplicationFactor factor,
+                              Pipeline.PipelineState state,
+                              Collection<DatanodeDetails> excludeDns,
+                              Collection<PipelineID> excludePipelines);
+
+  List<Pipeline> getPipelines(HddsProtos.ReplicationType type,
+                              Pipeline.PipelineState... states);
+
+  NavigableSet<ContainerID> getContainers(PipelineID pipelineID)
+      throws IOException;
+
+  int getNumberOfContainers(PipelineID pipelineID) throws IOException;
+
+
+  void removeContainerFromPipeline(PipelineID pipelineID,
+                                   ContainerID containerID) throws IOException;
+
+  void close() throws Exception;
+
+  // TODO remove legacy interfaces once we switch to Ratis based.
+
+  void addPipeline(Pipeline pipeline) throws IOException;
+
+  Pipeline removePipeline(PipelineID pipelineID) throws IOException;
+
+  void updatePipelineState(PipelineID id, Pipeline.PipelineState newState)
+      throws IOException;
+
+  Pipeline finalizePipeline(PipelineID pipelineId)
+      throws IOException;
+
+  Pipeline openPipeline(PipelineID pipelineId) throws IOException;
+
+  void activatePipeline(PipelineID pipelineID)
+      throws IOException;
+
+  void deactivatePipeline(PipelineID pipelineID)
+      throws IOException;
+
+  void reinitialize(Table<PipelineID, Pipeline> pipelineStore)
+      throws IOException;
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java
index 415cf10..0b49ed8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java
@@ -19,7 +19,7 @@
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager;
+import org.apache.hadoop.hdds.scm.pipeline.StateManager;
 
 import java.util.List;
 
@@ -31,7 +31,7 @@
 public class DefaultLeaderChoosePolicy extends LeaderChoosePolicy {
 
   public DefaultLeaderChoosePolicy(
-      NodeManager nodeManager, PipelineStateManager pipelineStateManager) {
+      NodeManager nodeManager, StateManager pipelineStateManager) {
     super(nodeManager, pipelineStateManager);
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java
index 04c155b..ada7702 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java
@@ -19,7 +19,7 @@
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager;
+import org.apache.hadoop.hdds.scm.pipeline.StateManager;
 
 import java.util.List;
 
@@ -29,10 +29,10 @@
 public abstract class LeaderChoosePolicy {
 
   private final NodeManager nodeManager;
-  private final PipelineStateManager pipelineStateManager;
+  private final StateManager pipelineStateManager;
 
   public LeaderChoosePolicy(
-      NodeManager nodeManager, PipelineStateManager pipelineStateManager) {
+      NodeManager nodeManager, StateManager pipelineStateManager) {
     this.nodeManager = nodeManager;
     this.pipelineStateManager = pipelineStateManager;
   }
@@ -49,7 +49,7 @@
     return nodeManager;
   }
 
-  protected PipelineStateManager getPipelineStateManager() {
+  protected StateManager getPipelineStateManager() {
     return pipelineStateManager;
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java
index 8e1a0ff..03d676e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java
@@ -21,7 +21,7 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager;
+import org.apache.hadoop.hdds.scm.pipeline.StateManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -45,7 +45,7 @@
 
   public static LeaderChoosePolicy getPolicy(
       ConfigurationSource conf, final NodeManager nodeManager,
-      final PipelineStateManager pipelineStateManager) throws SCMException {
+      final StateManager pipelineStateManager) throws SCMException {
     final Class<? extends LeaderChoosePolicy> policyClass = conf
         .getClass(ScmConfigKeys.OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY,
             OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY_DEFAULT,
@@ -53,7 +53,7 @@
     Constructor<? extends LeaderChoosePolicy> constructor;
     try {
       constructor = policyClass.getDeclaredConstructor(NodeManager.class,
-          PipelineStateManager.class);
+          StateManager.class);
       LOG.info("Create leader choose policy of type {}",
           policyClass.getCanonicalName());
     } catch (NoSuchMethodException e) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java
index d4068b9..8cb1df1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager;
+import org.apache.hadoop.hdds.scm.pipeline.StateManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,7 +41,7 @@
       LoggerFactory.getLogger(MinLeaderCountChoosePolicy.class);
 
   public MinLeaderCountChoosePolicy(
-      NodeManager nodeManager, PipelineStateManager pipelineStateManager) {
+      NodeManager nodeManager, StateManager pipelineStateManager) {
     super(nodeManager, pipelineStateManager);
   }
 
@@ -66,7 +66,7 @@
 
   private Map<DatanodeDetails, Integer> getSuggestedLeaderCount(
       List<DatanodeDetails> dns, NodeManager nodeManager,
-      PipelineStateManager pipelineStateManager) {
+      StateManager pipelineStateManager) {
     Map<DatanodeDetails, Integer> suggestedLeaderCount = new HashMap<>();
     for (DatanodeDetails dn : dns) {
       suggestedLeaderCount.put(dn, 0);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
index 3f405dc..cc0c776 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java
@@ -26,12 +26,15 @@
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetSCMCertRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMListCertificateRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMListCertificateResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Status;
 import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 
@@ -53,14 +56,17 @@
       LoggerFactory.getLogger(SCMSecurityProtocolServerSideTranslatorPB.class);
 
   private final SCMSecurityProtocol impl;
+  private final StorageContainerManager scm;
 
   private OzoneProtocolMessageDispatcher<SCMSecurityRequest,
       SCMSecurityResponse, ProtocolMessageEnum>
       dispatcher;
 
   public SCMSecurityProtocolServerSideTranslatorPB(SCMSecurityProtocol impl,
+      StorageContainerManager storageContainerManager,
       ProtocolMessageMetrics messageMetrics) {
     this.impl = impl;
+    this.scm = storageContainerManager;
     this.dispatcher =
         new OzoneProtocolMessageDispatcher<>("ScmSecurityProtocol",
             messageMetrics, LOG);
@@ -69,55 +75,79 @@
   @Override
   public SCMSecurityResponse submitRequest(RpcController controller,
       SCMSecurityRequest request) throws ServiceException {
+    if (!scm.checkLeader()) {
+      throw new ServiceException(scm.getScmHAManager()
+          .getRatisServer()
+          .triggerNotLeaderException());
+    }
     return dispatcher.processRequest(request, this::processRequest,
         request.getCmdType(), request.getTraceID());
   }
 
-  public SCMSecurityResponse processRequest(SCMSecurityRequest request)
-      throws ServiceException {
+  public SCMSecurityResponse processRequest(SCMSecurityRequest request) {
+    SCMSecurityResponse.Builder scmSecurityResponse =
+        SCMSecurityResponse.newBuilder().setCmdType(request.getCmdType())
+        .setStatus(Status.OK);
     try {
       switch (request.getCmdType()) {
       case GetCertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetCertResponseProto(
-                getCertificate(request.getGetCertificateRequest()))
-            .build();
+        return scmSecurityResponse.setGetCertResponseProto(
+            getCertificate(request.getGetCertificateRequest())).build();
       case GetCACertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetCertResponseProto(
-                getCACertificate(request.getGetCACertificateRequest()))
-            .build();
+        return scmSecurityResponse.setGetCertResponseProto(
+            getCACertificate(request.getGetCACertificateRequest())).build();
       case GetOMCertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetCertResponseProto(
-                getOMCertificate(request.getGetOMCertRequest()))
+        return scmSecurityResponse.setGetCertResponseProto(
+            getOMCertificate(request.getGetOMCertRequest()))
             .build();
       case GetDataNodeCertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setGetCertResponseProto(
-                getDataNodeCertificate(request.getGetDataNodeCertRequest()))
+        return scmSecurityResponse.setGetCertResponseProto(
+            getDataNodeCertificate(request.getGetDataNodeCertRequest()))
             .build();
       case ListCertificate:
-        return SCMSecurityResponse.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setStatus(Status.OK)
-            .setListCertificateResponseProto(
-                listCertificate(request.getListCertificateRequest()))
+        return scmSecurityResponse.setListCertificateResponseProto(
+            listCertificate(request.getListCertificateRequest()))
             .build();
+      case GetSCMCertificate:
+        return scmSecurityResponse.setGetCertResponseProto(getSCMCertificate(
+            request.getGetSCMCertificateRequest())).build();
+      case GetRootCACertificate:
+        return scmSecurityResponse.setGetCertResponseProto(
+            getRootCACertificate()).build();
+      case ListCACertificate:
+        return scmSecurityResponse.setListCertificateResponseProto(
+            listCACertificate()).build();
       default:
         throw new IllegalArgumentException(
             "Unknown request type: " + request.getCmdType());
       }
     } catch (IOException e) {
-      throw new ServiceException(e);
+      scmSecurityResponse.setSuccess(false);
+      scmSecurityResponse.setStatus(exceptionToResponseStatus(e));
+      // If actual cause is set in SCMSecurityException, set message with
+      // actual cause message.
+      if (e.getMessage() != null) {
+        scmSecurityResponse.setMessage(e.getMessage());
+      } else {
+        if (e.getCause() != null && e.getCause().getMessage() != null) {
+          scmSecurityResponse.setMessage(e.getCause().getMessage());
+        }
+      }
+      return scmSecurityResponse.build();
+    }
+  }
+
+  /**
+   * Convert exception to corresponsing status.
+   * @param ex
+   * @return SCMSecurityProtocolProtos.Status code of the error.
+   */
+  private Status exceptionToResponseStatus(IOException ex) {
+    if (ex instanceof SCMSecurityException) {
+      return Status.values()[
+          ((SCMSecurityException) ex).getErrorCode().ordinal()];
+    } else {
+      return Status.INTERNAL_ERROR;
     }
   }
 
@@ -147,6 +177,30 @@
   }
 
   /**
+   * Get signed certificate for SCM.
+   *
+   * @param request - SCMGetSCMCertRequestProto
+   * @return SCMGetCertResponseProto.
+   */
+
+  public SCMGetCertResponseProto getSCMCertificate(
+      SCMGetSCMCertRequestProto request)
+      throws IOException {
+
+    String certificate = impl.getSCMCertificate(request.getScmDetails(),
+        request.getCSR());
+    SCMGetCertResponseProto.Builder builder =
+        SCMGetCertResponseProto
+            .newBuilder()
+            .setResponseCode(ResponseCode.success)
+            .setX509Certificate(certificate)
+            .setX509CACertificate(impl.getCACertificate());
+
+    return builder.build();
+
+  }
+
+  /**
    * Get SCM signed certificate for OzoneManager.
    *
    * @param request
@@ -209,4 +263,32 @@
 
 
   }
+
+
+  public SCMGetCertResponseProto getRootCACertificate() throws IOException {
+    String rootCACertificate = impl.getRootCACertificate();
+    SCMGetCertResponseProto.Builder builder =
+        SCMGetCertResponseProto
+            .newBuilder()
+            .setResponseCode(ResponseCode.success)
+            .setX509RootCACertificate(rootCACertificate);
+    return builder.build();
+  }
+
+  public SCMListCertificateResponseProto listCACertificate()
+      throws IOException {
+
+    List<String> certs = impl.listCACertificate();
+
+    SCMListCertificateResponseProto.Builder builder =
+        SCMListCertificateResponseProto
+            .newBuilder()
+            .setResponseCode(SCMListCertificateResponseProto
+                .ResponseCode.success)
+            .addAllCertificates(certs);
+    return builder.build();
+
+  }
+
+
 }
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
index fbbc68a..3385774 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
@@ -36,12 +36,14 @@
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Status;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
@@ -63,6 +65,7 @@
     implements ScmBlockLocationProtocolPB {
 
   private final ScmBlockLocationProtocol impl;
+  private final StorageContainerManager scm;
 
   private static final Logger LOG = LoggerFactory
       .getLogger(ScmBlockLocationProtocolServerSideTranslatorPB.class);
@@ -78,9 +81,11 @@
    */
   public ScmBlockLocationProtocolServerSideTranslatorPB(
       ScmBlockLocationProtocol impl,
+      StorageContainerManager scm,
       ProtocolMessageMetrics<ProtocolMessageEnum> metrics)
       throws IOException {
     this.impl = impl;
+    this.scm = scm;
     dispatcher = new OzoneProtocolMessageDispatcher<>(
         "BlockLocationProtocol", metrics, LOG);
 
@@ -97,6 +102,11 @@
   @Override
   public SCMBlockLocationResponse send(RpcController controller,
       SCMBlockLocationRequest request) throws ServiceException {
+    if (!scm.getScmContext().isLeader()) {
+      throw new ServiceException(scm.getScmHAManager()
+                                    .getRatisServer()
+                                    .triggerNotLeaderException());
+    }
     return dispatcher.processRequest(
         request,
         this::processMessage,
@@ -126,6 +136,10 @@
         response.setGetScmInfoResponse(
             getScmInfo(request.getGetScmInfoRequest()));
         break;
+      case AddScm:
+        response.setAddScmResponse(
+            getAddSCMResponse(request.getAddScmRequestProto()));
+        break;
       case SortDatanodes:
         response.setSortDatanodesResponse(sortDatanodes(
             request.getSortDatanodesRequest(), request.getVersion()
@@ -212,6 +226,15 @@
         .build();
   }
 
+  public HddsProtos.AddScmResponseProto getAddSCMResponse(
+      HddsProtos.AddScmRequestProto req)
+      throws IOException {
+    boolean status = impl.addSCM(AddSCMRequest.getFromProtobuf(req));
+    return HddsProtos.AddScmResponseProto.newBuilder()
+        .setSuccess(status)
+        .build();
+  }
+
   public SortDatanodesResponseProto sortDatanodes(
       SortDatanodesRequestProto request, int clientVersion)
       throws ServiceException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 7b1cf77..68046fe 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -80,6 +80,7 @@
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import org.slf4j.Logger;
@@ -92,6 +93,7 @@
 
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto.Error.errorPipelineAlreadyExists;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto.Error.success;
+import static org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol.ADMIN_COMMAND_TYPE;
 
 /**
  * This class is the server-side translator that forwards requests received on
@@ -107,6 +109,7 @@
           StorageContainerLocationProtocolServerSideTranslatorPB.class);
 
   private final StorageContainerLocationProtocol impl;
+  private final StorageContainerManager scm;
 
   private OzoneProtocolMessageDispatcher<ScmContainerLocationRequest,
       ScmContainerLocationResponse, ProtocolMessageEnum>
@@ -121,9 +124,11 @@
    */
   public StorageContainerLocationProtocolServerSideTranslatorPB(
       StorageContainerLocationProtocol impl,
+      StorageContainerManager scm,
       ProtocolMessageMetrics<ProtocolMessageEnum> protocolMetrics)
       throws IOException {
     this.impl = impl;
+    this.scm = scm;
     this.dispatcher =
         new OzoneProtocolMessageDispatcher<>("ScmContainerLocation",
             protocolMetrics, LOG);
@@ -132,6 +137,13 @@
   @Override
   public ScmContainerLocationResponse submitRequest(RpcController controller,
       ScmContainerLocationRequest request) throws ServiceException {
+    // not leader or not belong to admin command.
+    if (!scm.getScmContext().isLeader()
+        && !ADMIN_COMMAND_TYPE.contains(request.getCmdType())) {
+      throw new ServiceException(scm.getScmHAManager()
+                                    .getRatisServer()
+                                    .triggerNotLeaderException());
+    }
     return dispatcher
         .processRequest(request, this::processRequest, request.getCmdType(),
             request.getTraceID());
@@ -492,8 +504,8 @@
     return HddsProtos.GetScmInfoResponseProto.newBuilder()
         .setClusterId(scmInfo.getClusterId())
         .setScmId(scmInfo.getScmId())
+        .addAllPeerRoles(scmInfo.getRatisPeerRoles())
         .build();
-
   }
 
   public InSafeModeResponseProto inSafeMode(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
index 39cf341..206e40b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
@@ -22,7 +22,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.lang3.tuple.Pair;
@@ -30,6 +29,9 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
@@ -83,7 +85,6 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(SCMSafeModeManager.class);
   private final boolean isSafeModeEnabled;
-  private final long waitTime;
   private AtomicBoolean inSafeMode = new AtomicBoolean(true);
   private AtomicBoolean preCheckComplete = new AtomicBoolean(false);
 
@@ -102,25 +103,24 @@
 
   private final EventQueue eventPublisher;
   private final PipelineManager pipelineManager;
+  private final SCMServiceManager serviceManager;
+  private final SCMContext scmContext;
 
   private final SafeModeMetrics safeModeMetrics;
 
   public SCMSafeModeManager(ConfigurationSource conf,
       List<ContainerInfo> allContainers, PipelineManager pipelineManager,
-      EventQueue eventQueue) {
+      EventQueue eventQueue, SCMServiceManager serviceManager,
+      SCMContext scmContext) {
     this.config = conf;
     this.pipelineManager = pipelineManager;
     this.eventPublisher = eventQueue;
+    this.serviceManager = serviceManager;
+    this.scmContext = scmContext;
     this.isSafeModeEnabled = conf.getBoolean(
         HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED,
         HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT);
 
-
-    this.waitTime = conf.getTimeDuration(
-        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
-        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-
     if (isSafeModeEnabled) {
       this.safeModeMetrics = SafeModeMetrics.create();
       ContainerSafeModeRule containerSafeModeRule =
@@ -147,13 +147,6 @@
         exitRules.put(ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE,
             oneReplicaPipelineSafeModeRule);
       }
-      boolean createPipelineInSafemode = conf.getBoolean(
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION,
-          HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION_DEFAULT);
-
-      if (createPipelineInSafemode) {
-        pipelineManager.startPipelineCreator();
-      }
     } else {
       this.safeModeMetrics = null;
       exitSafeMode(eventQueue);
@@ -177,28 +170,22 @@
   public void emitSafeModeStatus() {
     SafeModeStatus safeModeStatus =
         new SafeModeStatus(getInSafeMode(), getPreCheckComplete());
+    // TODO: remove eventPublisher,
+    //  since there will no consumer of SAFE_MODE_STATUS in future.
     eventPublisher.fireEvent(SCMEvents.SAFE_MODE_STATUS,
         safeModeStatus);
 
-    // Only notify the delayed listeners if safemode remains on, as precheck
-    // may have completed.
-    if (safeModeStatus.isInSafeMode()) {
-      eventPublisher.fireEvent(SCMEvents.DELAYED_SAFE_MODE_STATUS,
-          safeModeStatus);
-    } else {
-      // If safemode is off, then notify the delayed listeners with a delay.
-      final Thread safeModeExitThread = new Thread(() -> {
-        try {
-          Thread.sleep(waitTime);
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-        eventPublisher.fireEvent(SCMEvents.DELAYED_SAFE_MODE_STATUS,
-            safeModeStatus);
-      });
+    // update SCMContext
+    scmContext.updateSafeModeStatus(safeModeStatus);
 
-      safeModeExitThread.setDaemon(true);
-      safeModeExitThread.start();
+    // notify SCMServiceManager
+    if (!safeModeStatus.isInSafeMode()) {
+      // If safemode is off, then notify the delayed listeners with a delay.
+      serviceManager.notifyStatusChanged();
+    } else if (safeModeStatus.isPreCheckComplete()) {
+      // Only notify the delayed listeners if safemode remains on, as precheck
+      // may have completed.
+      serviceManager.notifyEventTriggered(Event.PRE_CHECK_COMPLETED);
     }
   }
 
@@ -331,8 +318,8 @@
    */
   public static class SafeModeStatus {
 
-    private boolean safeModeStatus;
-    private boolean preCheckPassed;
+    private final boolean safeModeStatus;
+    private final boolean preCheckPassed;
 
     public SafeModeStatus(boolean safeModeState, boolean preCheckPassed) {
       this.safeModeStatus = safeModeState;
@@ -346,6 +333,14 @@
     public boolean isPreCheckComplete() {
       return preCheckPassed;
     }
+
+    @Override
+    public String toString() {
+      return "SafeModeStatus{" +
+          "safeModeStatus=" + safeModeStatus +
+          ", preCheckPassed=" + preCheckPassed +
+          '}';
+    }
   }
 
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/OzoneStorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/OzoneStorageContainerManager.java
index adf14f7..b0e1ae1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/OzoneStorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/OzoneStorageContainerManager.java
@@ -22,8 +22,9 @@
 import java.net.InetSocketAddress;
 
 import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ReplicationManager;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 
@@ -45,10 +46,11 @@
 
   PipelineManager getPipelineManager();
 
-  ContainerManager getContainerManager();
+  ContainerManagerV2 getContainerManager();
 
   ReplicationManager getReplicationManager();
 
   InetSocketAddress getDatanodeRpcAddress();
 
+  SCMNodeDetails getScmNodeDetails();
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index f91dac7..3a1181e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -33,7 +33,7 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
@@ -62,7 +62,6 @@
 import com.google.common.collect.Maps;
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.ProtocolMessageEnum;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.IO_EXCEPTION;
@@ -115,11 +114,11 @@
     BlockingService blockProtoPbService =
         ScmBlockLocationProtocolProtos.ScmBlockLocationProtocolService
             .newReflectiveBlockingService(
-                new ScmBlockLocationProtocolServerSideTranslatorPB(this,
+                new ScmBlockLocationProtocolServerSideTranslatorPB(this, scm,
                     protocolMessageMetrics));
 
-    final InetSocketAddress scmBlockAddress = HddsServerUtil
-        .getScmBlockClientBindAddress(conf);
+    final InetSocketAddress scmBlockAddress =
+        scm.getScmNodeDetails().getBlockProtocolServerAddress();
     blockRpcServer =
         startRpcServer(
             conf,
@@ -129,8 +128,8 @@
             handlerCount);
     blockRpcAddress =
         updateRPCListenAddress(
-            conf, OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmBlockAddress,
-            blockRpcServer);
+            conf, scm.getScmNodeDetails().getBlockProtocolServerAddressKey(),
+            scmBlockAddress, blockRpcServer);
     if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
         false)) {
       blockRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance());
@@ -294,6 +293,33 @@
   }
 
   @Override
+  public boolean addSCM(AddSCMRequest request) throws IOException {
+    LOG.debug("Adding SCM {} addr {} cluster id {}",
+        request.getScmId(), request.getRatisAddr(), request.getClusterId());
+
+    Map<String, String> auditMap = Maps.newHashMap();
+    auditMap.put("scmId", String.valueOf(request.getScmId()));
+    auditMap.put("cluster", String.valueOf(request.getClusterId()));
+    auditMap.put("addr", String.valueOf(request.getRatisAddr()));
+    boolean auditSuccess = true;
+    try{
+      return scm.getScmHAManager().addSCM(request);
+    } catch (Exception ex) {
+      auditSuccess = false;
+      AUDIT.logReadFailure(
+          buildAuditMessageForFailure(SCMAction.ADD_SCM, auditMap, ex)
+      );
+      throw ex;
+    } finally {
+      if(auditSuccess) {
+        AUDIT.logReadSuccess(
+            buildAuditMessageForSuccess(SCMAction.ADD_SCM, auditMap)
+        );
+      }
+    }
+  }
+
+  @Override
   public List<DatanodeDetails> sortDatanodes(List<String> nodes,
       String clientMachine) throws IOException {
     boolean auditSuccess = true;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
index 13d1307..ee4bc20 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hdds.scm.server;
 
 import java.io.IOException;
+import java.lang.reflect.Proxy;
 import java.math.BigInteger;
 import java.security.cert.CRLException;
 import java.security.cert.X509CRL;
@@ -33,7 +34,11 @@
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol;
+import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.CRLApprover;
@@ -49,18 +54,20 @@
 import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.ozone.OzoneConsts.CRL_SEQUENCE_ID_KEY;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.SCM;
+import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore.CertType.VALID_CERTS;
 
 /**
  * A Certificate Store class that persists certificates issued by SCM CA.
  */
-public class SCMCertStore implements CertificateStore {
+public final class SCMCertStore implements CertificateStore {
   private static final Logger LOG =
       LoggerFactory.getLogger(SCMCertStore.class);
-  private final SCMMetadataStore scmMetadataStore;
+  private SCMMetadataStore scmMetadataStore;
   private final Lock lock;
   private AtomicLong crlSequenceId;
 
-  public SCMCertStore(SCMMetadataStore dbStore, long sequenceId) {
+  private SCMCertStore(SCMMetadataStore dbStore, long sequenceId) {
     this.scmMetadataStore = dbStore;
     lock = new ReentrantLock();
     crlSequenceId = new AtomicLong(sequenceId);
@@ -68,16 +75,54 @@
 
   @Override
   public void storeValidCertificate(BigInteger serialID,
-                                    X509Certificate certificate)
+      X509Certificate certificate, NodeType role)
       throws IOException {
     lock.lock();
     try {
       // This makes sure that no certificate IDs are reusable.
-      if ((getCertificateByID(serialID, CertType.VALID_CERTS) == null) &&
-          (getCertificateByID(serialID, CertType.REVOKED_CERTS) == null)) {
-        scmMetadataStore.getValidCertsTable().put(serialID, certificate);
+      if (role == SCM) {
+        // If the role is SCM, store certificate in scm cert table
+        // and valid cert table. This is to help to return scm certs during
+        // getCertificate call.
+        storeValidScmCertificate(serialID, certificate);
       } else {
-        throw new SCMSecurityException("Conflicting certificate ID");
+        // As we don't have different table for other roles, other role
+        // certificates will go to validCertsTable.
+        scmMetadataStore.getValidCertsTable().put(serialID, certificate);
+      }
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Writes a new SCM certificate that was issued to the persistent store.
+   * @param serialID - Certificate Serial Number.
+   * @param certificate - Certificate to persist.
+   * @throws IOException - on Failure.
+   */
+  private void storeValidScmCertificate(BigInteger serialID,
+      X509Certificate certificate) throws IOException {
+    lock.lock();
+    try {
+      BatchOperation batchOperation =
+          scmMetadataStore.getBatchHandler().initBatchOperation();
+      scmMetadataStore.getValidSCMCertsTable().putWithBatch(batchOperation,
+          serialID, certificate);
+      scmMetadataStore.getValidCertsTable().putWithBatch(batchOperation,
+          serialID, certificate);
+      scmMetadataStore.getStore().commitBatchOperation(batchOperation);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  public void checkValidCertID(BigInteger serialID) throws IOException {
+    lock.lock();
+    try {
+      if ((getCertificateByID(serialID, VALID_CERTS) != null) ||
+          (getCertificateByID(serialID, CertType.REVOKED_CERTS) != null)) {
+        throw new SCMSecurityException("Conflicting certificate ID" + serialID);
       }
     } finally {
       lock.unlock();
@@ -166,7 +211,7 @@
   public X509Certificate getCertificateByID(BigInteger serialID,
                                             CertType certType)
       throws IOException {
-    if (certType == CertType.VALID_CERTS) {
+    if (certType == VALID_CERTS) {
       return scmMetadataStore.getValidCertsTable().get(serialID);
     } else {
       return scmMetadataStore.getRevokedCertsTable().get(serialID);
@@ -174,32 +219,24 @@
   }
 
   @Override
-  public List<X509Certificate> listCertificate(HddsProtos.NodeType role,
+  public List<X509Certificate> listCertificate(NodeType role,
       BigInteger startSerialID, int count, CertType certType)
       throws IOException {
-    // TODO: Filter by role
-    List<? extends Table.KeyValue<BigInteger, X509Certificate>> certs;
+
+    Preconditions.checkNotNull(startSerialID);
+
     if (startSerialID.longValue() == 0) {
       startSerialID = null;
     }
-    if (certType == CertType.VALID_CERTS) {
-      certs = scmMetadataStore.getValidCertsTable().getRangeKVs(
-          startSerialID, count);
-    } else {
-      certs = scmMetadataStore.getRevokedCertsTable().getRangeKVs(
-          startSerialID, count);
-    }
+
+    List<? extends Table.KeyValue<BigInteger, X509Certificate>> certs =
+        getCertTableList(role, certType, startSerialID, count);
+
     List<X509Certificate> results = new ArrayList<>(certs.size());
+
     for (Table.KeyValue<BigInteger, X509Certificate> kv : certs) {
       try {
         X509Certificate cert = kv.getValue();
-        // TODO: filter certificate based on CN and specified role.
-        // This requires change of the approved subject CN format:
-        // Subject: O=CID-e66d4728-32bb-4282-9770-351a7e913f07,
-        // OU=9a7c4f86-c862-4067-b12c-e7bca51d3dfe, CN=root@98dba189d5f0
-
-        // The new format will look like below that are easier to filter.
-        // CN=FQDN/user=root/role=datanode/...
         results.add(cert);
       } catch (IOException e) {
         LOG.error("Fail to list certificate from SCM metadata store", e);
@@ -209,4 +246,78 @@
     }
     return results;
   }
+
+  private List<? extends Table.KeyValue<BigInteger, X509Certificate>>
+      getCertTableList(NodeType role, CertType certType,
+      BigInteger startSerialID, int count)
+      throws IOException {
+    // Implemented for role SCM and CertType VALID_CERTS.
+    // TODO: Implement for role OM/Datanode and for SCM for CertType
+    //  REVOKED_CERTS.
+
+    if (role == SCM) {
+      if (certType == VALID_CERTS) {
+        return scmMetadataStore.getValidSCMCertsTable().getRangeKVs(
+            startSerialID, count);
+      } else {
+        return scmMetadataStore.getRevokedCertsTable().getRangeKVs(
+            startSerialID, count);
+      }
+    } else {
+      if (certType == VALID_CERTS) {
+        return scmMetadataStore.getValidCertsTable().getRangeKVs(
+            startSerialID, count);
+      } else {
+        return scmMetadataStore.getRevokedCertsTable().getRangeKVs(
+            startSerialID, count);
+      }
+    }
+  }
+
+  /**
+   * Reinitialise the underlying store with SCMMetaStore
+   * during SCM StateMachine reload.
+   * @param metadataStore
+   */
+  @Override
+  public void reinitialize(SCMMetadataStore metadataStore) {
+    this.scmMetadataStore = metadataStore;
+  }
+
+  public static class Builder {
+
+    private SCMMetadataStore metadataStore;
+    private long crlSequenceId;
+    private SCMRatisServer scmRatisServer;
+
+
+    public Builder setMetadaStore(SCMMetadataStore scmMetadataStore) {
+      this.metadataStore = scmMetadataStore;
+      return this;
+    }
+
+    public Builder setCRLSequenceId(long sequenceId) {
+      this.crlSequenceId = sequenceId;
+      return this;
+    }
+
+    public Builder setRatisServer(final SCMRatisServer ratisServer) {
+      scmRatisServer = ratisServer;
+      return this;
+    }
+
+    public CertificateStore build() {
+      final SCMCertStore scmCertStore = new SCMCertStore(metadataStore,
+          crlSequenceId);
+
+      final SCMHAInvocationHandler scmhaInvocationHandler =
+          new SCMHAInvocationHandler(SCMRatisProtocol.RequestType.CERT_STORE,
+              scmCertStore, scmRatisServer);
+
+      return (CertificateStore) Proxy.newProxyInstance(
+          SCMHAInvocationHandler.class.getClassLoader(),
+          new Class<?>[]{CertificateStore.class}, scmhaInvocationHandler);
+
+    }
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index dccb75e..4dda296 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -30,35 +30,28 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
 import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.ScmUtils;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
 import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
-import org.apache.hadoop.hdds.scm.node.NodeStatus;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -77,16 +70,16 @@
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collections;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
+import java.util.Collections;
+import java.util.ArrayList;
 import java.util.TreeSet;
+import java.util.Set;
 import java.util.stream.Collectors;
 
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService.newReflectiveBlockingService;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer;
@@ -97,8 +90,7 @@
  * The RPC server that listens to requests from clients.
  */
 public class SCMClientProtocolServer implements
-    StorageContainerLocationProtocol, Auditor,
-    EventHandler<SafeModeStatus> {
+    StorageContainerLocationProtocol, Auditor {
   private static final Logger LOG =
       LoggerFactory.getLogger(SCMClientProtocolServer.class);
   private static final AuditLogger AUDIT =
@@ -107,14 +99,12 @@
   private final InetSocketAddress clientRpcAddress;
   private final StorageContainerManager scm;
   private final OzoneConfiguration conf;
-  private SafeModePrecheck safeModePrecheck;
   private final ProtocolMessageMetrics<ProtocolMessageEnum> protocolMetrics;
 
   public SCMClientProtocolServer(OzoneConfiguration conf,
       StorageContainerManager scm) throws IOException {
     this.scm = scm;
     this.conf = conf;
-    safeModePrecheck = new SafeModePrecheck(conf);
     final int handlerCount =
         conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
             OZONE_SCM_HANDLER_COUNT_DEFAULT);
@@ -130,10 +120,11 @@
     BlockingService storageProtoPbService =
         newReflectiveBlockingService(
             new StorageContainerLocationProtocolServerSideTranslatorPB(this,
+                scm,
                 protocolMetrics));
 
-    final InetSocketAddress scmAddress = HddsServerUtil
-        .getScmClientBindAddress(conf);
+    final InetSocketAddress scmAddress =
+        scm.getScmNodeDetails().getClientProtocolServerAddress();
     clientRpcServer =
         startRpcServer(
             conf,
@@ -142,7 +133,8 @@
             storageProtoPbService,
             handlerCount);
     clientRpcAddress =
-        updateRPCListenAddress(conf, OZONE_SCM_CLIENT_ADDRESS_KEY,
+        updateRPCListenAddress(conf,
+            scm.getScmNodeDetails().getClientProtocolServerAddressKey(),
             scmAddress, clientRpcServer);
     if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
         false)) {
@@ -191,7 +183,10 @@
   public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType
       replicationType, HddsProtos.ReplicationFactor factor,
       String owner) throws IOException {
-    ScmUtils.preCheck(ScmOps.allocateContainer, safeModePrecheck);
+    if (scm.getScmContext().isInSafeMode()) {
+      throw new SCMException("SafeModePrecheck failed for allocateContainer",
+          ResultCodes.SAFE_MODE_EXCEPTION);
+    }
     getScm().checkAdminAccess(getRpcRemoteUsername());
 
     final ContainerInfo container = scm.getContainerManager()
@@ -210,7 +205,7 @@
     getScm().checkAdminAccess(remoteUser);
     try {
       return scm.getContainerManager()
-          .getContainer(ContainerID.valueof(containerID));
+          .getContainer(ContainerID.valueOf(containerID));
     } catch (IOException ex) {
       auditSuccess = false;
       AUDIT.logReadFailure(
@@ -229,11 +224,11 @@
 
   private ContainerWithPipeline getContainerWithPipelineCommon(
       long containerID) throws IOException {
-    final ContainerID cid = ContainerID.valueof(containerID);
+    final ContainerID cid = ContainerID.valueOf(containerID);
     final ContainerInfo container = scm.getContainerManager()
         .getContainer(cid);
 
-    if (safeModePrecheck.isInSafeMode()) {
+    if (scm.getScmContext().isInSafeMode()) {
       if (container.isOpen()) {
         if (!hasRequiredReplicas(container)) {
           throw new SCMException("Open container " + containerID + " doesn't"
@@ -275,13 +270,13 @@
       AUDIT.logReadSuccess(buildAuditMessageForSuccess(
           SCMAction.GET_CONTAINER_WITH_PIPELINE,
           Collections.singletonMap("containerID",
-          ContainerID.valueof(containerID).toString())));
+          ContainerID.valueOf(containerID).toString())));
       return cp;
     } catch (IOException ex) {
       AUDIT.logReadFailure(buildAuditMessageForFailure(
           SCMAction.GET_CONTAINER_WITH_PIPELINE,
           Collections.singletonMap("containerID",
-              ContainerID.valueof(containerID).toString()), ex));
+              ContainerID.valueOf(containerID).toString()), ex));
       throw ex;
     }
   }
@@ -298,13 +293,13 @@
       try {
         ContainerWithPipeline cp = getContainerWithPipelineCommon(containerID);
         cpList.add(cp);
-        strContainerIDs.append(ContainerID.valueof(containerID).toString());
+        strContainerIDs.append(ContainerID.valueOf(containerID).toString());
         strContainerIDs.append(",");
       } catch (IOException ex) {
         AUDIT.logReadFailure(buildAuditMessageForFailure(
             SCMAction.GET_CONTAINER_WITH_PIPELINE_BATCH,
             Collections.singletonMap("containerID",
-                ContainerID.valueof(containerID).toString()), ex));
+                ContainerID.valueOf(containerID).toString()), ex));
         throw ex;
       }
     }
@@ -368,13 +363,13 @@
       auditMap.put("state", state.name());
     }
     try {
-      // To allow startcontainerId to take the value "0",
-      // "null" is assigned, so that its handled in the
-      // scm.getContainerManager().listContainer method
-      final ContainerID containerId = startContainerID != 0 ? ContainerID
-          .valueof(startContainerID) : null;
-      return scm.getContainerManager().
-          listContainer(containerId, count, state);
+      final ContainerID containerId = ContainerID.valueOf(startContainerID);
+      if(null == state) {
+        return scm.getContainerManager().getContainers(containerId, count);
+      }
+      return scm.getContainerManager().getContainers(state).stream()
+          .filter(info -> info.containerID().getId() >= startContainerID)
+          .sorted().limit(count).collect(Collectors.toList());
     } catch (Exception ex) {
       auditSuccess = false;
       AUDIT.logReadFailure(
@@ -398,7 +393,7 @@
     try {
       getScm().checkAdminAccess(remoteUser);
       scm.getContainerManager().deleteContainer(
-          ContainerID.valueof(containerID));
+          ContainerID.valueOf(containerID));
     } catch (Exception ex) {
       auditSuccess = false;
       AUDIT.logWriteFailure(
@@ -489,7 +484,7 @@
     auditMap.put("remoteUser", remoteUser);
     try {
       scm.checkAdminAccess(remoteUser);
-      final ContainerID cid = ContainerID.valueof(containerID);
+      final ContainerID cid = ContainerID.valueOf(containerID);
       final HddsProtos.LifeCycleState state = scm.getContainerManager()
           .getContainer(cid).getState();
       if (!state.equals(HddsProtos.LifeCycleState.OPEN)) {
@@ -497,7 +492,7 @@
             ResultCodes.UNEXPECTED_CONTAINER_STATE);
       }
       scm.getEventQueue().fireEvent(SCMEvents.CLOSE_CONTAINER,
-          ContainerID.valueof(containerID));
+          ContainerID.valueOf(containerID));
       AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
           SCMAction.CLOSE_CONTAINER, auditMap));
     } catch (Exception ex) {
@@ -561,7 +556,7 @@
     PipelineManager pipelineManager = scm.getPipelineManager();
     Pipeline pipeline =
         pipelineManager.getPipeline(PipelineID.getFromProtobuf(pipelineID));
-    pipelineManager.finalizeAndDestroyPipeline(pipeline, true);
+    pipelineManager.closePipeline(pipeline, true);
     AUDIT.logWriteSuccess(
         buildAuditMessageForSuccess(SCMAction.CLOSE_PIPELINE, null)
     );
@@ -575,6 +570,17 @@
           new ScmInfo.Builder()
               .setClusterId(scm.getScmStorageConfig().getClusterID())
               .setScmId(scm.getScmStorageConfig().getScmId());
+      if (scm.getScmHAManager().getRatisServer() != null) {
+        builder.setRatisPeerRoles(
+            scm.getScmHAManager().getRatisServer().getRatisRoles());
+      } else {
+        // In case, there is no ratis, there is no ratis role.
+        // This will just print the hostname with ratis port as the default
+        // behaviour.
+        String adddress = scm.getSCMHANodeDetails().getLocalNodeDetails()
+            .getRatisHostPortStr();
+        builder.setRatisPeerRoles(Arrays.asList(adddress));
+      }
       return builder.build();
     } catch (Exception ex) {
       auditSuccess = false;
@@ -793,7 +799,7 @@
    * Set safe mode status based on .
    */
   public boolean getSafeModeStatus() {
-    return safeModePrecheck.isInSafeMode();
+    return scm.getScmContext().isInSafeMode();
   }
 
 
@@ -846,10 +852,4 @@
   public void close() throws IOException {
     stop();
   }
-
-  @Override
-  public void onMessage(SafeModeStatus status,
-      EventPublisher publisher) {
-    safeModePrecheck.setInSafeMode(status.isInSafeMode());
-  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
index 9bbabd1..7cdd5c5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
@@ -21,9 +21,11 @@
 
 
 import org.apache.hadoop.hdds.scm.block.BlockManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ReplicationManager;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -51,7 +53,9 @@
  * ReplicationManager replicationManager;
  * SCMSafeModeManager scmSafeModeManager;
  * CertificateServer certificateServer;
- * SCMMetadata scmMetadataStore.
+ * SCMMetadata scmMetadataStore;
+ * SCMHAManager scmHAManager;
+ * SCMContext scmContext.
  *
  * If any of these are *not* specified then the default version of these
  * managers are used by SCM.
@@ -60,13 +64,15 @@
 public final class SCMConfigurator {
   private NodeManager scmNodeManager;
   private PipelineManager pipelineManager;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private BlockManager scmBlockManager;
   private ReplicationManager replicationManager;
   private SCMSafeModeManager scmSafeModeManager;
   private CertificateServer certificateServer;
   private SCMMetadataStore metadataStore;
   private NetworkTopology networkTopology;
+  private SCMHAManager scmHAManager;
+  private SCMContext scmContext;
 
   /**
    * Allows user to specify a version of Node manager to use with this SCM.
@@ -90,7 +96,7 @@
    *  this SCM.
    * @param containerManager - Container Manager.
    */
-  public void setContainerManager(ContainerManager containerManager) {
+  public void setContainerManager(ContainerManagerV2 containerManager) {
     this.containerManager = containerManager;
   }
 
@@ -149,6 +155,24 @@
   }
 
   /**
+   * Allows user to specify a custom version of SCMHAManager to be
+   * used with this SCM.
+   * @param scmHaMgr - SCMHAManager.
+   */
+  public void setSCMHAManager(SCMHAManager scmHaMgr) {
+    this.scmHAManager = scmHaMgr;
+  }
+
+  /**
+   * Allows user to specify a custom version of SCMContext to be
+   * used with this SCM.
+   * @param scmContext - SCMContext.
+   */
+  public void setScmContext(SCMContext scmContext) {
+    this.scmContext = scmContext;
+  }
+
+  /**
    * Gets SCM Node Manager.
    * @return Node Manager.
    */
@@ -168,7 +192,7 @@
    * Get Container Manager.
    * @return container Manger.
    */
-  public ContainerManager getContainerManager() {
+  public ContainerManagerV2 getContainerManager() {
     return containerManager;
   }
 
@@ -219,4 +243,20 @@
   public NetworkTopology getNetworkTopology() {
     return networkTopology;
   }
+
+  /**
+   * Get SCMHAManager.
+   * @return SCMHAManager.
+   */
+  public SCMHAManager getSCMHAManager() {
+    return scmHAManager;
+  }
+
+  /**
+   * Get SCMContext.
+   * @return SCMContext.
+   */
+  public SCMContext getScmContext() {
+    return scmContext;
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index c2dafcb..85f43ed 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -44,10 +44,10 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -87,7 +87,6 @@
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.replicateContainerCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.setNodeOperationalStateCommand;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
@@ -137,7 +136,8 @@
     heartbeatDispatcher = new SCMDatanodeHeartbeatDispatcher(
         scm.getScmNodeManager(), eventPublisher);
 
-    InetSocketAddress datanodeRpcAddr = getDataNodeBindAddress(conf);
+    InetSocketAddress datanodeRpcAddr = getDataNodeBindAddress(
+        conf, scm.getScmNodeDetails());
 
     protocolMessageMetrics = getProtocolMessageMetrics();
 
@@ -294,6 +294,11 @@
       throws IOException {
     SCMCommandProto.Builder builder =
         SCMCommandProto.newBuilder();
+
+    // In HA mode, it is the term of current leader SCM.
+    // In non-HA mode, it is the default value 0.
+    builder.setTerm(cmd.getTerm());
+
     switch (cmd.getType()) {
     case reregisterCommand:
       return builder
@@ -425,7 +430,7 @@
    * @return
    */
   protected String getDatanodeAddressKey() {
-    return OZONE_SCM_DATANODE_ADDRESS_KEY;
+    return this.scm.getScmNodeDetails().getDatanodeAddressKey();
   }
 
   /**
@@ -433,8 +438,9 @@
    * @param conf ozone configuration
    * @return InetSocketAddress
    */
-  protected InetSocketAddress getDataNodeBindAddress(OzoneConfiguration conf) {
-    return HddsServerUtil.getScmDataNodeBindAddress(conf);
+  protected InetSocketAddress getDataNodeBindAddress(
+      OzoneConfiguration conf, SCMNodeDetails scmNodeDetails) {
+    return scmNodeDetails.getDatanodeProtocolServerAddress();
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
index 8b8eff4..5a1ecf3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
@@ -31,9 +31,10 @@
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmNodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
 import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
 import org.apache.hadoop.hdds.scm.protocol.SCMSecurityProtocolServerSideTranslatorPB;
@@ -53,6 +54,10 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.CERTIFICATE_NOT_FOUND;
+import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_CA_CERT_FAILED;
+import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_CERTIFICATE_FAILED;
+import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_ROOT_CA_CERT_FAILED;
 import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType.KERBEROS_TRUSTED;
 
 /**
@@ -69,9 +74,12 @@
   private final RPC.Server rpcServer;
   private final InetSocketAddress rpcAddress;
   private final ProtocolMessageMetrics metrics;
+  private final StorageContainerManager storageContainerManager;
 
   SCMSecurityProtocolServer(OzoneConfiguration conf,
-      CertificateServer certificateServer) throws IOException {
+      CertificateServer certificateServer, StorageContainerManager scm)
+      throws IOException {
+    this.storageContainerManager = scm;
     this.certificateServer = certificateServer;
     final int handlerCount =
         conf.getInt(ScmConfigKeys.OZONE_SCM_SECURITY_HANDLER_COUNT_KEY,
@@ -87,7 +95,8 @@
     BlockingService secureProtoPbService =
         SCMSecurityProtocolProtos.SCMSecurityProtocolService
             .newReflectiveBlockingService(
-                new SCMSecurityProtocolServerSideTranslatorPB(this, metrics));
+                new SCMSecurityProtocolServerSideTranslatorPB(this,
+                    scm, metrics));
     this.rpcServer =
         StorageContainerManager.startRpcServer(
             conf,
@@ -115,18 +124,7 @@
     LOGGER.info("Processing CSR for dn {}, UUID: {}", dnDetails.getHostName(),
         dnDetails.getUuid());
     Objects.requireNonNull(dnDetails);
-    Future<X509CertificateHolder> future =
-        certificateServer.requestCertificate(certSignReq,
-            KERBEROS_TRUSTED);
-
-    try {
-      return CertificateCodec.getPEMEncodedString(future.get());
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      throw new IOException("getDataNodeCertificate operation failed. ", e);
-    } catch (ExecutionException e) {
-      throw new IOException("getDataNodeCertificate operation failed. ", e);
-    }
+    return getEncodedCertToString(certSignReq, NodeType.DATANODE);
   }
 
   /**
@@ -142,20 +140,80 @@
     LOGGER.info("Processing CSR for om {}, UUID: {}", omDetails.getHostName(),
         omDetails.getUuid());
     Objects.requireNonNull(omDetails);
+    return getEncodedCertToString(certSignReq, NodeType.OM);
+  }
+
+
+  /**
+   * Get signed certificate for SCM Node.
+   *
+   * @param scmNodeDetails   - SCM Node Details.
+   * @param certSignReq - Certificate signing request.
+   * @return String         - SCM signed pem encoded certificate.
+   */
+  @Override
+  public String getSCMCertificate(ScmNodeDetailsProto scmNodeDetails,
+      String certSignReq) throws IOException {
+    Objects.requireNonNull(scmNodeDetails);
+    LOGGER.info("Processing CSR for scm {}, nodeId: {}",
+        scmNodeDetails.getHostName(), scmNodeDetails.getScmNodeId());
+
+    // Check clusterID
+    if (storageContainerManager.getClusterId().equals(
+        scmNodeDetails.getClusterId())) {
+      throw new IOException("SCM ClusterId mismatch. Peer SCM ClusterId " +
+          scmNodeDetails.getClusterId() + ", primary SCM ClusterId "
+          + storageContainerManager.getClusterId());
+    }
+
+    return getEncodedCertToString(certSignReq, NodeType.SCM);
+
+  }
+
+  /**
+   *  Request certificate for the specified role.
+   * @param certSignReq - Certificate signing request.
+   * @param nodeType - role OM/SCM/DATANODE
+   * @return String         - SCM signed pem encoded certificate.
+   * @throws IOException
+   */
+  private String getEncodedCertToString(String certSignReq, NodeType nodeType)
+      throws IOException {
     Future<X509CertificateHolder> future =
         certificateServer.requestCertificate(certSignReq,
-            KERBEROS_TRUSTED);
-
+            KERBEROS_TRUSTED, nodeType);
     try {
       return CertificateCodec.getPEMEncodedString(future.get());
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
-      throw new IOException("getOMCertificate operation failed. ", e);
+      throw generateException(e, nodeType);
     } catch (ExecutionException e) {
-      throw new IOException("getOMCertificate operation failed. ", e);
+      if (e.getCause() != null) {
+        if (e.getCause() instanceof SCMSecurityException) {
+          throw (SCMSecurityException) e.getCause();
+        } else {
+          throw generateException(e, nodeType);
+        }
+      } else {
+        throw generateException(e, nodeType);
+      }
     }
   }
 
+  private SCMSecurityException generateException(Exception ex, NodeType role) {
+    SCMSecurityException.ErrorCode errorCode;
+    if (role == NodeType.SCM) {
+      errorCode = SCMSecurityException.ErrorCode.GET_SCM_CERTIFICATE_FAILED;
+    } else if (role == NodeType.OM) {
+      errorCode = SCMSecurityException.ErrorCode.GET_OM_CERTIFICATE_FAILED;
+    } else {
+      errorCode = SCMSecurityException.ErrorCode.GET_DN_CERTIFICATE_FAILED;
+    }
+    return new SCMSecurityException("generate " + role.toString() +
+        " Certificate operation failed", ex, errorCode);
+
+  }
+
   /**
    * Get SCM signed certificate with given serial id.
    *
@@ -173,10 +231,12 @@
         return CertificateCodec.getPEMEncodedString(certificate);
       }
     } catch (CertificateException e) {
-      throw new IOException("getCertificate operation failed. ", e);
+      throw new SCMSecurityException("getCertificate operation failed. ", e,
+          GET_CERTIFICATE_FAILED);
     }
     LOGGER.debug("Certificate with serial id {} not found.", certSerialId);
-    throw new IOException("Certificate not found");
+    throw new SCMSecurityException("Certificate not found",
+        CERTIFICATE_NOT_FOUND);
   }
 
   /**
@@ -191,7 +251,8 @@
       return CertificateCodec.getPEMEncodedString(
           certificateServer.getCACertificate());
     } catch (CertificateException e) {
-      throw new IOException("getRootCertificate operation failed. ", e);
+      throw new SCMSecurityException("getRootCertificate operation failed. ",
+          e, GET_CA_CERT_FAILED);
     }
   }
 
@@ -205,7 +266,7 @@
    * @throws IOException
    */
   @Override
-  public List<String> listCertificate(HddsProtos.NodeType role,
+  public List<String> listCertificate(NodeType role,
       long startSerialId, int count, boolean isRevoked) throws IOException {
     List<X509Certificate> certificates =
         certificateServer.listCertificate(role, startSerialId, count,
@@ -216,12 +277,35 @@
         String certStr = CertificateCodec.getPEMEncodedString(cert);
         results.add(certStr);
       } catch (SCMSecurityException e) {
-        throw new IOException("listCertificate operation failed. ", e);
+        throw new SCMSecurityException("listCertificate operation failed.",
+            e, e.getErrorCode());
       }
     }
     return results;
   }
 
+  @Override
+  public List<String> listCACertificate() throws IOException {
+    List<String> caCerts =
+        listCertificate(NodeType.SCM, 0, 10, false);
+    caCerts.add(getRootCACertificate());
+    return caCerts;
+  }
+
+  @Override
+  public String getRootCACertificate() throws IOException {
+    LOGGER.debug("Getting Root CA certificate.");
+    //TODO: This code will be modified after HDDS-4897 is merged and
+    // integrated. For now getting RootCA cert from certificateServer.
+    try {
+      return CertificateCodec.getPEMEncodedString(
+          certificateServer.getCACertificate());
+    } catch (CertificateException e) {
+      throw new SCMSecurityException("getRootCertificate operation failed. ",
+          e, GET_ROOT_CA_CERT_FAILED);
+    }
+  }
+
   public RPC.Server getRpcServer() {
     return rpcServer;
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java
index 7d84fc0..1ae29b1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java
@@ -33,5 +33,7 @@
   void start(OzoneConfiguration conf) throws Exception;
   boolean init(OzoneConfiguration conf, String clusterId)
       throws IOException;
+  boolean bootStrap(OzoneConfiguration conf)
+      throws IOException;
   String generateClusterId();
 }
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
index a628279..5d9f4de 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java
@@ -27,9 +27,11 @@
 import java.util.Properties;
 import java.util.UUID;
 
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_CERT_SERIAL_ID;
 import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
 import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR;
 
+
 /**
  * SCMStorageConfig is responsible for management of the
  * StorageDirectories used by the SCM.
@@ -76,4 +78,20 @@
     return scmProperties;
   }
 
+  /**
+   * Sets the SCM Sub-CA certificate serial id.
+   * @param certSerialId
+   * @throws IOException
+   */
+  public void setScmCertSerialId(String certSerialId) throws IOException {
+    getStorageInfo().setProperty(SCM_CERT_SERIAL_ID, certSerialId);
+  }
+
+  /**
+   * Retrives the SCM Sub-CA certificate serial id from the version file.
+   * @return scm sub-CA certificate serial id
+   */
+  public String getScmCertSerialId() {
+    return getStorageInfo().getProperty(SCM_CERT_SERIAL_ID);
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index bca98ed..22d41fd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -25,12 +25,20 @@
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.protobuf.BlockingService;
+
 import java.security.cert.CertificateException;
 import java.security.cert.X509Certificate;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
+import java.util.UUID;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
 
@@ -44,23 +52,36 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl;
+import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
+import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile;
+import org.apache.hadoop.hdds.utils.HAUtils;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl;
+import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImplV2;
 import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler;
 import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.ReplicationManager;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
@@ -84,7 +105,7 @@
 import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.PipelineChoosePolicyFactory;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
@@ -95,7 +116,6 @@
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.utils.HddsVersionInfo;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.io.IOUtils;
@@ -116,15 +136,12 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.protobuf.BlockingService;
-
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdds.utils.HAUtils.checkSecurityAndSCMHAEnabled;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
 import static org.apache.hadoop.ozone.OzoneConsts.CRL_SEQUENCE_ID_KEY;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_ROOT_CA_COMPONENT_NAME;
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_ROOT_CA_PREFIX;
 
 /**
  * StorageContainerManager is the main entry point for the service that
@@ -163,14 +180,19 @@
    */
   private NodeManager scmNodeManager;
   private PipelineManager pipelineManager;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private BlockManager scmBlockManager;
   private final SCMStorageConfig scmStorageConfig;
   private NodeDecommissionManager scmDecommissionManager;
 
   private SCMMetadataStore scmMetadataStore;
+  private SCMHAManager scmHAManager;
+  private SCMContext scmContext;
+  private SequenceIdGenerator sequenceIdGen;
 
   private final EventQueue eventQueue;
+  private final SCMServiceManager serviceManager;
+
   /*
    * HTTP endpoint for JMX access.
    */
@@ -208,6 +230,8 @@
   private NetworkTopology clusterMap;
   private PipelineChoosePolicy pipelineChoosePolicy;
 
+  private final SCMHANodeDetails scmHANodeDetails;
+
   /**
    * Creates a new StorageContainerManager. Configuration will be
    * updated with information on the actual listening addresses used
@@ -215,7 +239,7 @@
    *
    * @param conf configuration
    */
-  public StorageContainerManager(OzoneConfiguration conf)
+  private StorageContainerManager(OzoneConfiguration conf)
       throws IOException, AuthenticationException {
     // default empty configurator means default managers will be used.
     this(conf, new SCMConfigurator());
@@ -230,14 +254,18 @@
    * @param conf - Configuration
    * @param configurator - configurator
    */
-  public StorageContainerManager(OzoneConfiguration conf,
-                                 SCMConfigurator configurator)
+  private StorageContainerManager(OzoneConfiguration conf,
+                                  SCMConfigurator configurator)
       throws IOException, AuthenticationException  {
     super(HddsVersionInfo.HDDS_VERSION_INFO);
 
     Objects.requireNonNull(configurator, "configurator cannot not be null");
     Objects.requireNonNull(conf, "configuration cannot not be null");
 
+    checkSecurityAndSCMHAEnabled(conf);
+
+    scmHANodeDetails = SCMHANodeDetails.loadSCMHAConfig(conf);
+
     configuration = conf;
     initMetrics();
     containerReportCache = buildContainerReportCache();
@@ -247,9 +275,14 @@
      */
     scmStorageConfig = new SCMStorageConfig(conf);
     if (scmStorageConfig.getState() != StorageState.INITIALIZED) {
-      LOG.error("Please make sure you have run \'ozone scm --init\' " +
+      String errMsg = "Please make sure you have run \'ozone scm --init\' " +
           "command to generate all the required metadata to " +
-          scmStorageConfig.getStorageDir() + ".");
+          scmStorageConfig.getStorageDir();
+      if (SCMHAUtils.isSCMHAEnabled(conf)) {
+        errMsg += " or make sure you have run \'ozone scm --bootstrap\' cmd to "
+            + "add the SCM to existing SCM HA group";
+      }
+      LOG.error(errMsg + ".");
       throw new SCMException("SCM not initialized due to storage config " +
           "failure.", ResultCodes.SCM_NOT_INITIALIZED);
     }
@@ -268,6 +301,16 @@
     // A valid pointer to the store is required by all the other services below.
     initalizeMetadataStore(conf, configurator);
 
+    eventQueue = new EventQueue();
+    serviceManager = new SCMServiceManager();
+
+    long watcherTimeout =
+        conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
+            HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
+    commandWatcherLeaseManager = new LeaseManager<>("CommandWatcher",
+        watcherTimeout);
+    initializeSystemManagers(conf, configurator);
+
     // Authenticate SCM if security is enabled, this initialization can only
     // be done after the metadata store is initialized.
     if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
@@ -280,25 +323,19 @@
       securityProtocolServer = null;
     }
 
-    eventQueue = new EventQueue();
-    long watcherTimeout =
-        conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
-            HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-    commandWatcherLeaseManager = new LeaseManager<>("CommandWatcher",
-        watcherTimeout);
-    initializeSystemManagers(conf, configurator);
-
     CloseContainerEventHandler closeContainerHandler =
-        new CloseContainerEventHandler(pipelineManager, containerManager);
+        new CloseContainerEventHandler(
+            pipelineManager, containerManager, scmContext);
     NodeReportHandler nodeReportHandler =
         new NodeReportHandler(scmNodeManager);
     PipelineReportHandler pipelineReportHandler =
-        new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf);
+        new PipelineReportHandler(
+            scmSafeModeManager, pipelineManager, scmContext, conf);
     CommandStatusReportHandler cmdStatusReportHandler =
         new CommandStatusReportHandler();
 
     NewNodeHandler newNodeHandler = new NewNodeHandler(pipelineManager,
-        scmDecommissionManager, conf);
+        scmDecommissionManager, conf, serviceManager);
     StaleNodeHandler staleNodeHandler =
         new StaleNodeHandler(scmNodeManager, pipelineManager, conf);
     DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
@@ -306,20 +343,21 @@
     StartDatanodeAdminHandler datanodeStartAdminHandler =
         new StartDatanodeAdminHandler(scmNodeManager, pipelineManager);
     NonHealthyToHealthyNodeHandler nonHealthyToHealthyNodeHandler =
-        new NonHealthyToHealthyNodeHandler(pipelineManager, conf);
+        new NonHealthyToHealthyNodeHandler(conf, serviceManager);
     ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
     PendingDeleteHandler pendingDeleteHandler =
         new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
 
     ContainerReportHandler containerReportHandler =
-        new ContainerReportHandler(scmNodeManager, containerManager, conf);
+        new ContainerReportHandler(
+            scmNodeManager, containerManager, scmContext, conf);
 
     IncrementalContainerReportHandler incrementalContainerReportHandler =
         new IncrementalContainerReportHandler(
-            scmNodeManager, containerManager);
+            scmNodeManager, containerManager, scmContext);
 
     PipelineActionHandler pipelineActionHandler =
-        new PipelineActionHandler(pipelineManager, conf);
+        new PipelineActionHandler(pipelineManager, scmContext, conf);
 
     scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
         .OZONE_ADMINISTRATORS);
@@ -351,15 +389,9 @@
     eventQueue
         .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler);
     eventQueue.addHandler(SCMEvents.DELETE_BLOCK_STATUS,
-        (DeletedBlockLogImpl) scmBlockManager.getDeletedBlockLog());
+        (DeletedBlockLogImplV2) scmBlockManager.getDeletedBlockLog());
     eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS, pipelineActionHandler);
     eventQueue.addHandler(SCMEvents.PIPELINE_REPORT, pipelineReportHandler);
-    eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, clientProtocolServer);
-    eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, scmBlockManager);
-    eventQueue
-        .addHandler(SCMEvents.DELAYED_SAFE_MODE_STATUS, replicationManager);
-    eventQueue
-        .addHandler(SCMEvents.DELAYED_SAFE_MODE_STATUS, pipelineManager);
 
     // Emit initial safe mode status, as now handlers are registered.
     scmSafeModeManager.emitSafeModeStatus();
@@ -373,6 +405,32 @@
   }
 
   /**
+   * Create an SCM instance based on the supplied configuration.
+   *
+   * @param conf        HDDS configuration
+   * @param configurator SCM configurator
+   * @return SCM instance
+   * @throws IOException, AuthenticationException
+   */
+  public static StorageContainerManager createSCM(
+      OzoneConfiguration conf, SCMConfigurator configurator)
+      throws IOException, AuthenticationException {
+    return new StorageContainerManager(conf, configurator);
+  }
+
+  /**
+   * Create an SCM instance based on the supplied configuration.
+   *
+   * @param conf        HDDS configuration
+   * @return SCM instance
+   * @throws IOException, AuthenticationException
+   */
+  public static StorageContainerManager createSCM(OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
+    return createSCM(conf, new SCMConfigurator());
+  }
+
+  /**
    * This function initializes the following managers. If the configurator
    * specifies a value, we will use it, else we will use the default value.
    *
@@ -397,11 +455,39 @@
       clusterMap = new NetworkTopologyImpl(conf);
     }
 
+    if (configurator.getSCMHAManager() != null) {
+      scmHAManager = configurator.getSCMHAManager();
+    } else {
+      scmHAManager = new SCMHAManagerImpl(conf, this);
+    }
+
+    // inline upgrade for SequenceIdGenerator
+    SequenceIdGenerator.upgradeToSequenceId(scmMetadataStore);
+    // Distributed sequence id generator
+    sequenceIdGen = new SequenceIdGenerator(
+        conf, scmHAManager, scmMetadataStore.getSequenceIdTable());
+
+    if (configurator.getScmContext() != null) {
+      scmContext = configurator.getScmContext();
+    } else {
+      // When term equals SCMContext.INVALID_TERM, the isLeader() check
+      // and getTermOfLeader() will always pass.
+      long term = SCMHAUtils.isSCMHAEnabled(conf) ? 0 : SCMContext.INVALID_TERM;
+      // non-leader of term 0, in safe mode, preCheck not completed.
+      scmContext = new SCMContext.Builder()
+          .setLeader(false)
+          .setTerm(term)
+          .setIsInSafeMode(true)
+          .setIsPreCheckComplete(false)
+          .setSCM(this)
+          .build();
+    }
+
     if(configurator.getScmNodeManager() != null) {
       scmNodeManager = configurator.getScmNodeManager();
     } else {
       scmNodeManager = new SCMNodeManager(
-          conf, scmStorageConfig, eventQueue, clusterMap);
+          conf, scmStorageConfig, eventQueue, clusterMap, scmContext);
     }
 
     placementMetrics = SCMContainerPlacementMetrics.create();
@@ -413,19 +499,21 @@
       pipelineManager = configurator.getPipelineManager();
     } else {
       pipelineManager =
-          new SCMPipelineManager(conf, scmNodeManager,
+          PipelineManagerV2Impl.newPipelineManager(
+              conf,
+              scmHAManager,
+              scmNodeManager,
               scmMetadataStore.getPipelineTable(),
-              eventQueue);
+              eventQueue,
+              scmContext,
+              serviceManager);
     }
 
     if (configurator.getContainerManager() != null) {
       containerManager = configurator.getContainerManager();
     } else {
-      containerManager =
-          new SCMContainerManager(conf,
-              scmMetadataStore.getContainerTable(),
-              scmMetadataStore.getBatchHandler(),
-              pipelineManager);
+      containerManager = new ContainerManagerImpl(conf, scmHAManager,
+          sequenceIdGen, pipelineManager, scmMetadataStore.getContainerTable());
     }
 
     pipelineChoosePolicy = PipelineChoosePolicyFactory.getPolicy(conf);
@@ -438,10 +526,12 @@
       replicationManager = configurator.getReplicationManager();
     }  else {
       replicationManager = new ReplicationManager(
-          conf.getObject(ReplicationManagerConfiguration.class),
+          conf,
           containerManager,
           containerPlacementPolicy,
           eventQueue,
+          scmContext,
+          serviceManager,
           new LockManager<>(conf),
           scmNodeManager);
     }
@@ -449,7 +539,8 @@
       scmSafeModeManager = configurator.getScmSafeModeManager();
     } else {
       scmSafeModeManager = new SCMSafeModeManager(conf,
-          containerManager.getContainers(), pipelineManager, eventQueue);
+          containerManager.getContainers(),
+          pipelineManager, eventQueue, serviceManager, scmContext);
     }
     scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager,
         containerManager, eventQueue, replicationManager);
@@ -479,12 +570,16 @@
     certificateServer.init(new SecurityConfig(conf),
         CertificateServer.CAType.SELF_SIGNED_CA);
     securityProtocolServer = new SCMSecurityProtocolServer(conf,
-        certificateServer);
+        certificateServer, this);
 
     grpcTlsConfig = createTlsClientConfigForSCM(new SecurityConfig(conf),
             certificateServer);
   }
 
+  public CertificateServer getCertificateServer() {
+    return certificateServer;
+  }
+
   // For Internal gRPC client from SCM to DN with gRPC TLS
   static GrpcTlsConfig createTlsClientConfigForSCM(SecurityConfig conf,
       CertificateServer certificateServer) throws IOException {
@@ -551,7 +646,6 @@
     LOG.info("SCM login successful.");
   }
 
-
   /**
    * This function creates/initializes a certificate server as needed.
    * This function is idempotent, so calling this again and again after the
@@ -564,16 +658,22 @@
       String scmID) throws IOException {
     // TODO: Support Certificate Server loading via Class Name loader.
     // So it is easy to use different Certificate Servers if needed.
-    String subject = "scm@" + InetAddress.getLocalHost().getHostName();
+    String subject = SCM_ROOT_CA_PREFIX +
+        InetAddress.getLocalHost().getHostName();
     if(this.scmMetadataStore == null) {
       LOG.error("Cannot initialize Certificate Server without a valid meta " +
           "data layer.");
       throw new SCMException("Cannot initialize CA without a valid metadata " +
           "store", ResultCodes.SCM_NOT_INITIALIZED);
     }
-    SCMCertStore certStore = new SCMCertStore(this.scmMetadataStore,
-        getLastSequenceIdForCRL());
-    return new DefaultCAServer(subject, clusterID, scmID, certStore);
+
+    CertificateStore certStore =
+        new SCMCertStore.Builder().setMetadaStore(scmMetadataStore)
+            .setRatisServer(scmHAManager.getRatisServer())
+            .setCRLSequenceId(getLastSequenceIdForCRL()).build();
+
+    return new DefaultCAServer(subject, clusterID, scmID, certStore,
+        new DefaultProfile(), SCM_ROOT_CA_COMPONENT_NAME);
   }
 
   long getLastSequenceIdForCRL() throws IOException {
@@ -637,15 +737,69 @@
   }
 
   /**
-   * Create an SCM instance based on the supplied configuration.
+   * Routine to bootstrap the StorageContainerManager. This will connect to a
+   * running SCM instance which has valid cluster id and fetch the cluster id
+   * from there.
    *
-   * @param conf        HDDS configuration
-   * @return SCM instance
-   * @throws IOException, AuthenticationException
+   * TODO: once SCM HA security is enabled, CSR cerificates will be fetched from
+   * running scm leader instance as well.
+   *
+   * @param conf OzoneConfiguration
+   * @return true if SCM bootstrap is successful, false otherwise.
+   * @throws IOException if init fails due to I/O error
    */
-  public static StorageContainerManager createSCM(OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-    return new StorageContainerManager(conf);
+  public static boolean scmBootstrap(OzoneConfiguration conf)
+      throws IOException {
+    if (!SCMHAUtils.isSCMHAEnabled(conf)) {
+      LOG.error("Bootstrap is not supported without SCM HA.");
+      return false;
+    }
+    // The node here will try to fetch the cluster id from any of existing
+    // running SCM instances.
+    SCMHANodeDetails scmhaNodeDetails = SCMHANodeDetails.loadSCMHAConfig(conf);
+    String primordialSCM = SCMHAUtils.getPrimordialSCM(conf);
+    String selfNodeId = scmhaNodeDetails.getLocalNodeDetails().getNodeId();
+    if (primordialSCM != null && SCMHAUtils.isPrimordialSCM(conf, selfNodeId)) {
+      LOG.info(
+          "SCM bootstrap command can only be executed in non-Primordial SCM "
+              + "{}, self id {} "
+              + "Ignoring it.", primordialSCM, selfNodeId);
+      return true;
+    }
+    OzoneConfiguration config =
+        SCMHAUtils.removeSelfId(conf,
+            scmhaNodeDetails.getLocalNodeDetails().getNodeId());
+    final ScmInfo scmInfo = HAUtils.getScmInfo(config);
+    SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf);
+    final String persistedClusterId = scmStorageConfig.getClusterID();
+    final String fetchedId = scmInfo.getClusterId();
+    Preconditions.checkNotNull(fetchedId);
+    StorageState state = scmStorageConfig.getState();
+    if (state == StorageState.INITIALIZED) {
+      Preconditions.checkNotNull(scmStorageConfig.getScmId());
+      if (!fetchedId.equals(persistedClusterId)) {
+        LOG.error(
+            "Could not bootstrap as SCM is already initialized with cluster "
+                + "id {} but cluster id for existing leader SCM instance "
+                + "is {}", persistedClusterId, fetchedId);
+        return false;
+      }
+    } else {
+      try {
+        scmStorageConfig.setClusterId(fetchedId);
+        // It will write down the cluster Id fetched from already
+        // running SCM as well as the local SCM Id.
+
+        // SCM Node info containing hostname to scm Id mappings
+        // will be persisted into the version file once this node gets added
+        // to existing SCM ring post node regular start up.
+        scmStorageConfig.initialize();
+      } catch (IOException ioe) {
+        LOG.error("Could not initialize SCM version file", ioe);
+        return false;
+      }
+    }
+    return true;
   }
 
   /**
@@ -657,28 +811,51 @@
    */
   public static boolean scmInit(OzoneConfiguration conf,
       String clusterId) throws IOException {
+    checkSecurityAndSCMHAEnabled(conf);
     SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf);
     StorageState state = scmStorageConfig.getState();
+    final SCMHANodeDetails haDetails = SCMHANodeDetails.loadSCMHAConfig(conf);
+    String primordialSCM = SCMHAUtils.getPrimordialSCM(conf);
+    String selfNodeId = haDetails.getLocalNodeDetails().getNodeId();
+    if (primordialSCM != null && !SCMHAUtils
+        .isPrimordialSCM(conf, selfNodeId)) {
+      LOG.info(
+          "SCM init command can only be executed in Primordial SCM {}, "
+              + "self id {} "
+              + "Ignoring it.", primordialSCM, selfNodeId);
+      return true;
+    }
     if (state != StorageState.INITIALIZED) {
       try {
         if (clusterId != null && !clusterId.isEmpty()) {
+          // clusterId must be an UUID
+          Preconditions.checkNotNull(UUID.fromString(clusterId));
           scmStorageConfig.setClusterId(clusterId);
         }
         scmStorageConfig.initialize();
+        if (SCMHAUtils.isSCMHAEnabled(conf)) {
+          SCMRatisServerImpl.initialize(scmStorageConfig.getClusterID(),
+              scmStorageConfig.getScmId(), haDetails.getLocalNodeDetails(),
+              conf);
+        }
         LOG.info("SCM initialization succeeded. Current cluster id for sd={}"
-            + ";cid={};layoutVersion={}", scmStorageConfig.getStorageDir(),
-            scmStorageConfig.getClusterID(),
-            scmStorageConfig.getLayoutVersion());
+                + "; cid={}; layoutVersion={}; scmId={}",
+            scmStorageConfig.getStorageDir(), scmStorageConfig.getClusterID(),
+            scmStorageConfig.getLayoutVersion(), scmStorageConfig.getScmId());
         return true;
       } catch (IOException ioe) {
         LOG.error("Could not initialize SCM version file", ioe);
         return false;
       }
     } else {
+      clusterId = scmStorageConfig.getClusterID();
       LOG.info("SCM already initialized. Reusing existing cluster id for sd={}"
-          + ";cid={};layoutVersion={}", scmStorageConfig.getStorageDir(),
-          scmStorageConfig.getClusterID(),
-          scmStorageConfig.getLayoutVersion());
+              + ";cid={};layoutVersion={}", scmStorageConfig.getStorageDir(),
+          clusterId, scmStorageConfig.getLayoutVersion());
+      if (SCMHAUtils.isSCMHAEnabled(conf)) {
+        SCMRatisServerImpl.reinitialize(clusterId, scmStorageConfig.getScmId(),
+            haDetails.getLocalNodeDetails(), conf);
+      }
       return true;
     }
   }
@@ -766,7 +943,7 @@
   @VisibleForTesting
   public ContainerInfo getContainerInfo(long containerID) throws
       IOException {
-    return containerManager.getContainer(ContainerID.valueof(containerID));
+    return containerManager.getContainer(ContainerID.valueOf(containerID));
   }
 
   /**
@@ -796,6 +973,15 @@
   }
 
   @Override
+  public SCMNodeDetails getScmNodeDetails() {
+    return scmHANodeDetails.getLocalNodeDetails();
+  }
+
+  public SCMHANodeDetails getSCMHANodeDetails() {
+    return scmHANodeDetails;
+  }
+
+  @Override
   public String getDatanodeRpcPort() {
     InetSocketAddress addr = getDatanodeRpcAddress();
     return addr == null ? "0" : Integer.toString(addr.getPort());
@@ -812,6 +998,8 @@
           getClientRpcAddress()));
     }
 
+    scmHAManager.start();
+
     ms = HddsServerUtil
         .initializeMetrics(configuration, "StorageContainerManager");
 
@@ -856,7 +1044,6 @@
    */
   @Override
   public void stop() {
-
     try {
       LOG.info("Stopping Replication Manager Service.");
       replicationManager.stop();
@@ -946,6 +1133,13 @@
     if (jvmPauseMonitor != null) {
       jvmPauseMonitor.stop();
     }
+
+    try {
+      scmHAManager.shutdown();
+    } catch (Exception ex) {
+      LOG.error("SCM HA Manager stop failed", ex);
+    }
+
     IOUtils.cleanupWithLogger(LOG, containerManager);
     IOUtils.cleanupWithLogger(LOG, pipelineManager);
 
@@ -961,7 +1155,7 @@
 
     scmSafeModeManager.stop();
   }
-
+  
   /**
    * Wait until service has completed shutdown.
    */
@@ -1002,11 +1196,18 @@
   }
 
   /**
+   * Returns SCMHAManager.
+   */
+  public SCMHAManager getScmHAManager() {
+    return scmHAManager;
+  }
+
+  /**
    * Returns SCM container manager.
    */
   @VisibleForTesting
   @Override
-  public ContainerManager getContainerManager() {
+  public ContainerManagerV2 getContainerManager() {
     return containerManager;
   }
 
@@ -1049,6 +1250,23 @@
     return replicationManager;
   }
 
+  /**
+   * Check if the current scm is the leader and ready for accepting requests.
+   * @return - if the current scm is the leader and is ready.
+   */
+  public boolean checkLeader() {
+    // For NON-HA setup, the node will always be the leader
+    if (!SCMHAUtils.isSCMHAEnabled(configuration)) {
+      Preconditions.checkArgument(scmContext.isLeader());
+      return true;
+    } else {
+      // FOR HA setup, the node has to be the leader and ready to serve
+      // requests.
+      return scmContext.isLeader() && getScmHAManager().getRatisServer()
+          .getDivision().getInfo().isLeaderReady();
+    }
+  }
+
   public void checkAdminAccess(String remoteUser) throws IOException {
     if (remoteUser != null && !scmAdminUsernames.contains(remoteUser) &&
         !scmAdminUsernames.contains(OZONE_ADMINISTRATORS_WILDCARD)) {
@@ -1135,6 +1353,27 @@
   }
 
   /**
+   * Returns SCMContext.
+   */
+  public SCMContext getScmContext() {
+    return scmContext;
+  }
+
+  /**
+   * Returns SequenceIdGen.
+   */
+  public SequenceIdGenerator getSequenceIdGen() {
+    return sequenceIdGen;
+  }
+
+  /**
+   * Returns SCMServiceManager.
+   */
+  public SCMServiceManager getSCMServiceManager() {
+    return serviceManager;
+  }
+
+  /**
    * Force SCM out of safe mode.
    */
   public boolean exitSafeMode() {
@@ -1152,7 +1391,7 @@
     Map<String, Integer> nodeStateCount = new HashMap<>();
     for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) {
       nodeStateCount.put(state.toString(),
-          containerManager.getContainerCountByState(state));
+          containerManager.getContainers(state).size());
     }
     return nodeStateCount;
   }
@@ -1207,4 +1446,12 @@
   public String getClusterId() {
     return getScmStorageConfig().getClusterID();
   }
+
+  /**
+   * Return the node Id of this SCM.
+   * @return node Id.
+   */
+  public String getSCMNodeId() {
+    return scmHANodeDetails.getLocalNodeDetails().getNodeId();
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
index f30bfe3..4c7b693 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java
@@ -113,6 +113,25 @@
   }
 
   /**
+   * This function implements a sub-command to allow the SCM to be
+   * initialized from the command line.
+   */
+  @CommandLine.Command(name = "--bootstrap",
+      customSynopsis = "ozone scm [global options] --bootstrap",
+      hidden = false,
+      description = "Bootstrap SCM if not already done",
+      mixinStandardHelpOptions = true,
+      versionProvider = HddsVersionProvider.class)
+  public void bootStrapScm()
+      throws Exception {
+    commonInit();
+    boolean result = receiver.bootStrap(conf);
+    if (!result) {
+      throw new IOException("scm bootstrap failed");
+    }
+  }
+
+  /**
    * This function is used by the command line to start the SCM.
    */
   private void startScm() throws Exception {
@@ -154,6 +173,12 @@
     }
 
     @Override
+    public boolean bootStrap(OzoneConfiguration conf)
+        throws IOException{
+      return StorageContainerManager.scmBootstrap(conf);
+    }
+
+    @Override
     public String generateClusterId() {
       return StorageInfo.newClusterID();
     }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
index 4039b5a..d4d11ff 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
@@ -17,23 +17,16 @@
  */
 package org.apache.hadoop.hdds.scm;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.UUID;
 
 import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer
     .NodeRegistrationContainerReport;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
 
 /**
  * Stateless helper functions for Hdds tests.
@@ -74,24 +67,6 @@
         TestUtils.getContainerReports(containers));
   }
 
-  public static StorageContainerManager getScm(OzoneConfiguration conf)
-      throws IOException, AuthenticationException {
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-    SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-    if(scmStore.getState() != Storage.StorageState.INITIALIZED) {
-      String clusterId = UUID.randomUUID().toString();
-      String scmId = UUID.randomUUID().toString();
-      scmStore.setClusterId(clusterId);
-      scmStore.setScmId(scmId);
-      // writes the version file properties
-      scmStore.initialize();
-    }
-    return StorageContainerManager.createSCM(conf);
-  }
-
   /**
    * Creates list of ContainerInfo.
    *
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
index 2c65ee8..6f8d020 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
@@ -150,19 +150,6 @@
   }
 
   /**
-   * getScmAddressForDataNodes should fail when OZONE_SCM_NAMES has
-   * multiple addresses.
-   */
-  @Test
-  public void testClientFailsWithMultipleScmNames() {
-    final String scmHost = "host123,host456";
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    thrown.expect(IllegalArgumentException.class);
-    HddsServerUtil.getScmAddressForDataNodes(conf);
-  }
-
-  /**
    * Test {@link ServerUtils#getScmDbDir}.
    */
   @Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 42640f3..6aa4cfe 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -33,7 +33,10 @@
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
@@ -43,8 +46,6 @@
 import org.apache.hadoop.hdds.scm.server
     .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -64,6 +65,7 @@
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.security.authentication.client
     .AuthenticationException;
@@ -430,7 +432,7 @@
   }
 
   public static org.apache.hadoop.hdds.scm.container.ContainerInfo
-      allocateContainer(ContainerManager containerManager)
+      allocateContainer(ContainerManagerV2 containerManager)
       throws IOException {
     return containerManager
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
@@ -438,8 +440,8 @@
 
   }
 
-  public static void closeContainer(ContainerManager containerManager,
-      ContainerID id) throws IOException {
+  public static void closeContainer(ContainerManagerV2 containerManager,
+      ContainerID id) throws IOException, InvalidStateTransitionException {
     containerManager.updateContainerState(
         id, HddsProtos.LifeCycleEvent.FINALIZE);
     containerManager.updateContainerState(
@@ -453,8 +455,8 @@
    * @param id
    * @throws IOException
    */
-  public static void quasiCloseContainer(ContainerManager containerManager,
-      ContainerID id) throws IOException {
+  public static void quasiCloseContainer(ContainerManagerV2 containerManager,
+      ContainerID id) throws IOException, InvalidStateTransitionException {
     containerManager.updateContainerState(
         id, HddsProtos.LifeCycleEvent.FINALIZE);
     containerManager.updateContainerState(
@@ -464,6 +466,24 @@
 
   /**
    * Construct and returns StorageContainerManager instance using the given
+   * configuration.
+   *
+   * @param conf OzoneConfiguration
+   * @return StorageContainerManager instance
+   * @throws IOException
+   * @throws AuthenticationException
+   */
+  public static StorageContainerManager getScmSimple(OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
+    SCMConfigurator configurator = new SCMConfigurator();
+    // The default behaviour whether ratis will be enabled or not
+    // in SCM will be inferred from ozone-default.xml.
+    // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
+    return StorageContainerManager.createSCM(conf, configurator);
+  }
+
+  /**
+   * Construct and returns StorageContainerManager instance using the given
    * configuration. The ports used by this StorageContainerManager are
    * randomly selected from free ports available.
    *
@@ -474,7 +494,10 @@
    */
   public static StorageContainerManager getScm(OzoneConfiguration conf)
       throws IOException, AuthenticationException {
-    return getScm(conf, new SCMConfigurator());
+    SCMConfigurator configurator = new SCMConfigurator();
+    configurator.setSCMHAManager(MockSCMHAManager.getInstance(true));
+    configurator.setScmContext(SCMContext.emptyContext());
+    return getScm(conf, configurator);
   }
 
   /**
@@ -504,7 +527,7 @@
       // writes the version file properties
       scmStore.initialize();
     }
-    return new StorageContainerManager(conf, configurator);
+    return StorageContainerManager.createSCM(conf, configurator);
   }
 
   public static ContainerInfo getContainer(
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index a202647..8027cb6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -34,11 +34,17 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -48,8 +54,9 @@
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
+import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
 import org.apache.hadoop.hdds.scm.server.SCMConfigurator;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -76,14 +83,18 @@
  */
 public class TestBlockManager {
   private StorageContainerManager scm;
-  private SCMContainerManager mapping;
+  private ContainerManagerV2 mapping;
   private MockNodeManager nodeManager;
-  private SCMPipelineManager pipelineManager;
+  private PipelineManagerV2Impl pipelineManager;
   private BlockManagerImpl blockManager;
+  private SCMHAManager scmHAManager;
+  private SequenceIdGenerator sequenceIdGen;
   private static final long DEFAULT_BLOCK_SIZE = 128 * MB;
   private HddsProtos.ReplicationFactor factor;
   private HddsProtos.ReplicationType type;
   private EventQueue eventQueue;
+  private SCMContext scmContext;
+  private SCMServiceManager serviceManager;
   private int numContainerPerOwnerInPipeline;
   private OzoneConfiguration conf;
 
@@ -107,30 +118,45 @@
     conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 5,
         TimeUnit.SECONDS);
 
-    // Override the default Node Manager in SCM with this Mock Node Manager.
+    // Override the default Node Manager and SCMHAManager
+    // in SCM with the Mock one.
     nodeManager = new MockNodeManager(true, 10);
+    scmHAManager = MockSCMHAManager.getInstance(true);
+
     eventQueue = new EventQueue();
+    scmContext = SCMContext.emptyContext();
+    serviceManager = new SCMServiceManager();
 
     scmMetadataStore = new SCMMetadataStoreImpl(conf);
     scmMetadataStore.start(conf);
+
+    sequenceIdGen = new SequenceIdGenerator(
+        conf, scmHAManager, scmMetadataStore.getSequenceIdTable());
+
     pipelineManager =
-        new SCMPipelineManager(conf, nodeManager,
+        PipelineManagerV2Impl.newPipelineManager(
+            conf,
+            scmHAManager,
+            nodeManager,
             scmMetadataStore.getPipelineTable(),
-            eventQueue);
-    pipelineManager.allowPipelineCreation();
+            eventQueue,
+            scmContext,
+            serviceManager);
 
     PipelineProvider mockRatisProvider =
         new MockRatisPipelineProvider(nodeManager,
             pipelineManager.getStateManager(), conf, eventQueue);
     pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
         mockRatisProvider);
-    SCMContainerManager containerManager =
-        new SCMContainerManager(conf,
-            scmMetadataStore.getContainerTable(),
-            scmMetadataStore.getStore(),
-            pipelineManager);
+    ContainerManagerV2 containerManager =
+        new ContainerManagerImpl(conf,
+            scmHAManager,
+            sequenceIdGen,
+            pipelineManager,
+            scmMetadataStore.getContainerTable());
     SCMSafeModeManager safeModeManager = new SCMSafeModeManager(conf,
-        containerManager.getContainers(), pipelineManager, eventQueue) {
+        containerManager.getContainers(),
+        pipelineManager, eventQueue, serviceManager, scmContext) {
       @Override
       public void emitSafeModeStatus() {
         // skip
@@ -142,22 +168,22 @@
     configurator.setContainerManager(containerManager);
     configurator.setScmSafeModeManager(safeModeManager);
     configurator.setMetadataStore(scmMetadataStore);
+    configurator.setSCMHAManager(scmHAManager);
+    configurator.setScmContext(scmContext);
     scm = TestUtils.getScm(conf, configurator);
 
     // Initialize these fields so that the tests can pass.
-    mapping = (SCMContainerManager) scm.getContainerManager();
+    mapping = scm.getContainerManager();
     blockManager = (BlockManagerImpl) scm.getScmBlockManager();
     DatanodeCommandHandler handler = new DatanodeCommandHandler();
     eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, handler);
     CloseContainerEventHandler closeContainerHandler =
-        new CloseContainerEventHandler(pipelineManager, mapping);
+        new CloseContainerEventHandler(pipelineManager, mapping, scmContext);
     eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
     factor = HddsProtos.ReplicationFactor.THREE;
     type = HddsProtos.ReplicationType.RATIS;
 
-
-    blockManager.onMessage(
-        new SCMSafeModeManager.SafeModeStatus(false, false), null);
+    scm.getScmContext().updateSafeModeStatus(new SafeModeStatus(false, true));
   }
 
   @After
@@ -347,20 +373,20 @@
       CompletableFuture
               .allOf(futureList.toArray(
                       new CompletableFuture[futureList.size()])).get();
-      Assert.assertTrue(
-              pipelineManager.getPipelines(type).size() == 1);
+      Assert.assertEquals(1,
+              pipelineManager.getPipelines(type).size());
       Pipeline pipeline = pipelineManager.getPipelines(type).get(0);
       // total no of containers to be created will be number of healthy
       // volumes * number of numContainerPerOwnerInPipeline which is equal to
       // the thread count
-      Assert.assertTrue(threadCount == pipelineManager.
+      Assert.assertEquals(threadCount, pipelineManager.
               getNumberOfContainers(pipeline.getId()));
-      Assert.assertTrue(
-              allocatedBlockMap.size() == threadCount);
-      Assert.assertTrue(allocatedBlockMap.
-              values().size() == threadCount);
+      Assert.assertEquals(threadCount,
+              allocatedBlockMap.size());
+      Assert.assertEquals(threadCount, allocatedBlockMap.
+              values().size());
       allocatedBlockMap.values().stream().forEach(v -> {
-        Assert.assertTrue(v.size() == 1);
+        Assert.assertEquals(1, v.size());
       });
     } catch (Exception e) {
       Assert.fail("testAllocateBlockInParallel failed");
@@ -443,8 +469,8 @@
 
   @Test
   public void testAllocateBlockFailureInSafeMode() throws Exception {
-    blockManager.onMessage(
-        new SCMSafeModeManager.SafeModeStatus(true, true), null);
+    scm.getScmContext().updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(true, true));
     // Test1: In safe mode expect an SCMException.
     thrown.expectMessage("SafeModePrecheck failed for "
         + "allocateBlock");
@@ -555,7 +581,7 @@
   public void testBlockAllocationWithNoAvailablePipelines()
       throws IOException, TimeoutException, InterruptedException {
     for (Pipeline pipeline : pipelineManager.getPipelines()) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+      pipelineManager.closePipeline(pipeline, false);
     }
     Assert.assertEquals(0, pipelineManager.getPipelines(type, factor).size());
     Assert.assertNotNull(blockManager
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index df5126a..edf8e0c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -22,12 +22,14 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.ha.SCMHADBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHADBTransactionBuffer;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@@ -75,25 +77,34 @@
  */
 public class TestDeletedBlockLog {
 
-  private DeletedBlockLogImpl deletedBlockLog;
+  private  DeletedBlockLogImplV2 deletedBlockLog;
   private static final int BLOCKS_PER_TXN = 5;
   private OzoneConfiguration conf;
   private File testDir;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private StorageContainerManager scm;
   private List<DatanodeDetails> dnList;
+  private SCMHADBTransactionBuffer scmHADBTransactionBuffer;
 
   @Before
   public void setup() throws Exception {
     testDir = GenericTestUtils.getTestDir(
         TestDeletedBlockLog.class.getSimpleName());
     conf = new OzoneConfiguration();
+    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     scm = TestUtils.getScm(conf);
-    containerManager = Mockito.mock(SCMContainerManager.class);
-    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager,
-        scm.getScmMetadataStore());
+    containerManager = Mockito.mock(ContainerManagerV2.class);
+    scmHADBTransactionBuffer =
+        new MockSCMHADBTransactionBuffer(scm.getScmMetadataStore().getStore());
+    deletedBlockLog = new DeletedBlockLogImplV2(conf,
+        containerManager,
+        scm.getScmHAManager().getRatisServer(),
+        scm.getScmMetadataStore().getDeletedBlocksTXTable(),
+        scmHADBTransactionBuffer,
+        scm.getScmContext(),
+        scm.getSequenceIdGen());
     dnList = new ArrayList<>(3);
     setupContainerManager();
   }
@@ -153,31 +164,45 @@
     return blockMap;
   }
 
+  private void addTransactions(Map<Long, List<Long>> containerBlocksMap)
+      throws IOException {
+    deletedBlockLog.addTransactions(containerBlocksMap);
+    scmHADBTransactionBuffer.flush();
+  }
+
+  private void incrementCount(List<Long> txIDs) throws IOException {
+    deletedBlockLog.incrementCount(txIDs);
+    scmHADBTransactionBuffer.flush();
+  }
+
   private void commitTransactions(
       List<DeleteBlockTransactionResult> transactionResults,
-      DatanodeDetails... dns) {
+      DatanodeDetails... dns) throws IOException {
     for (DatanodeDetails dnDetails : dns) {
       deletedBlockLog
           .commitTransactions(transactionResults, dnDetails.getUuid());
     }
+    scmHADBTransactionBuffer.flush();
   }
 
   private void commitTransactions(
-      List<DeleteBlockTransactionResult> transactionResults) {
+      List<DeleteBlockTransactionResult> transactionResults)
+      throws IOException {
     commitTransactions(transactionResults,
         dnList.toArray(new DatanodeDetails[3]));
   }
 
   private void commitTransactions(
       Collection<DeletedBlocksTransaction> deletedBlocksTransactions,
-      DatanodeDetails... dns) {
+      DatanodeDetails... dns) throws IOException {
     commitTransactions(deletedBlocksTransactions.stream()
         .map(this::createDeleteBlockTransactionResult)
         .collect(Collectors.toList()), dns);
   }
 
   private void commitTransactions(
-      Collection<DeletedBlocksTransaction> deletedBlocksTransactions) {
+      Collection<DeletedBlocksTransaction> deletedBlocksTransactions)
+      throws IOException {
     commitTransactions(deletedBlocksTransactions.stream()
         .map(this::createDeleteBlockTransactionResult)
         .collect(Collectors.toList()));
@@ -208,9 +233,7 @@
     int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
 
     // Create 30 TXs in the log.
-    for (Map.Entry<Long, List<Long>> entry : generateData(30).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
+    addTransactions(generateData(30));
 
     // This will return all TXs, total num 30.
     List<DeletedBlocksTransaction> blocks =
@@ -219,12 +242,12 @@
         .collect(Collectors.toList());
 
     for (int i = 0; i < maxRetry; i++) {
-      deletedBlockLog.incrementCount(txIDs);
+      incrementCount(txIDs);
     }
 
     // Increment another time so it exceed the maxRetry.
     // On this call, count will be set to -1 which means TX eventually fails.
-    deletedBlockLog.incrementCount(txIDs);
+    incrementCount(txIDs);
     blocks = getTransactions(40 * BLOCKS_PER_TXN);
     for (DeletedBlocksTransaction block : blocks) {
       Assert.assertEquals(-1, block.getCount());
@@ -236,57 +259,8 @@
   }
 
   @Test
-  public void testIncrementCountLessFrequentWritingToDB() throws Exception {
-    OzoneConfiguration testConf = OzoneConfiguration.of(conf);
-    testConf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 120);
-
-    deletedBlockLog = new DeletedBlockLogImpl(testConf, containerManager,
-        scm.getScmMetadataStore());
-
-    for (Map.Entry<Long, List<Long>> entry :
-        generateData(1).entrySet()) {
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
-
-    List<DeletedBlocksTransaction> blocks =
-        getTransactions(40 * BLOCKS_PER_TXN);
-    List<Long> txIDs = blocks.stream().map(DeletedBlocksTransaction::getTxID)
-        .collect(Collectors.toList());
-
-    for (int i = 0; i < 50; i++) {
-      deletedBlockLog.incrementCount(txIDs);
-    }
-    blocks = getTransactions(40 * BLOCKS_PER_TXN);
-    for (DeletedBlocksTransaction block : blocks) {
-      // block count should not be updated as there are only 50 retries.
-      Assert.assertEquals(0, block.getCount());
-    }
-
-    for (int i = 0; i < 60; i++) {
-      deletedBlockLog.incrementCount(txIDs);
-    }
-    blocks = getTransactions(40 * BLOCKS_PER_TXN);
-    for (DeletedBlocksTransaction block : blocks) {
-      // block count should be updated to 100 as there are already 110 retries.
-      Assert.assertEquals(100, block.getCount());
-    }
-
-    for (int i = 0; i < 50; i++) {
-      deletedBlockLog.incrementCount(txIDs);
-    }
-    blocks = getTransactions(40 * BLOCKS_PER_TXN);
-    for (DeletedBlocksTransaction block : blocks) {
-      // block count should be updated to -1 as retry count exceeds maxRetry
-      // (i.e. 160 > maxRetry which is 120).
-      Assert.assertEquals(-1, block.getCount());
-    }
-  }
-
-  @Test
   public void testCommitTransactions() throws Exception {
-    for (Map.Entry<Long, List<Long>> entry : generateData(50).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
+    addTransactions(generateData(50));
     List<DeletedBlocksTransaction> blocks =
         getTransactions(20 * BLOCKS_PER_TXN);
     // Add an invalid txn.
@@ -320,10 +294,7 @@
     for (int i = 0; i < 100; i++) {
       int state = random.nextInt(4);
       if (state == 0) {
-        for (Map.Entry<Long, List<Long>> entry :
-            generateData(10).entrySet()){
-          deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-        }
+        addTransactions(generateData(10));
         added += 10;
       } else if (state == 1) {
         blocks = getTransactions(20);
@@ -331,7 +302,7 @@
         for (DeletedBlocksTransaction block : blocks) {
           txIDs.add(block.getTxID());
         }
-        deletedBlockLog.incrementCount(txIDs);
+        incrementCount(txIDs);
       } else if (state == 2) {
         commitTransactions(blocks);
         committed += blocks.size();
@@ -340,7 +311,7 @@
         // verify the number of added and committed.
         try (TableIterator<Long,
             ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
-            deletedBlockLog.getIterator()) {
+            scm.getScmMetadataStore().getDeletedBlocksTXTable().iterator()) {
           AtomicInteger count = new AtomicInteger();
           iter.forEachRemaining((keyValue) -> count.incrementAndGet());
           Assert.assertEquals(added, count.get() + committed);
@@ -353,14 +324,17 @@
 
   @Test
   public void testPersistence() throws Exception {
-    for (Map.Entry<Long, List<Long>> entry : generateData(50).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
+    addTransactions(generateData(50));
     // close db and reopen it again to make sure
     // transactions are stored persistently.
     deletedBlockLog.close();
-    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager,
-        scm.getScmMetadataStore());
+    deletedBlockLog = new DeletedBlockLogImplV2(conf,
+        containerManager,
+        scm.getScmHAManager().getRatisServer(),
+        scm.getScmMetadataStore().getDeletedBlocksTXTable(),
+        scmHADBTransactionBuffer,
+        scm.getScmContext(),
+        scm.getSequenceIdGen());
     List<DeletedBlocksTransaction> blocks =
         getTransactions(BLOCKS_PER_TXN * 10);
     Assert.assertEquals(10, blocks.size());
@@ -372,11 +346,16 @@
     // close db and reopen it again to make sure
     // currentTxnID = 50
     deletedBlockLog.close();
-    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager,
-        scm.getScmMetadataStore());
+    new DeletedBlockLogImplV2(conf,
+        containerManager,
+        scm.getScmHAManager().getRatisServer(),
+        scm.getScmMetadataStore().getDeletedBlocksTXTable(),
+        scmHADBTransactionBuffer,
+        scm.getScmContext(),
+        scm.getSequenceIdGen());
     blocks = getTransactions(BLOCKS_PER_TXN * 40);
     Assert.assertEquals(0, blocks.size());
-    Assert.assertEquals((long)deletedBlockLog.getCurrentTXID(), 50L);
+    //Assert.assertEquals((long)deletedBlockLog.getCurrentTXID(), 50L);
   }
 
   @Test
@@ -389,10 +368,11 @@
     long containerID;
 
     // Creates {TXNum} TX in the log.
-    for (Map.Entry<Long, List<Long>> entry : generateData(txNum).entrySet()) {
+    Map<Long, List<Long>> deletedBlocks = generateData(txNum);
+    addTransactions(deletedBlocks);
+    for (Map.Entry<Long, List<Long>> entry :deletedBlocks.entrySet()) {
       count++;
       containerID = entry.getKey();
-      deletedBlockLog.addTransaction(containerID, entry.getValue());
 
       if (count % 2 == 0) {
         mockContainerInfo(containerID, dnId1);
@@ -415,7 +395,9 @@
     builder.setTxID(11);
     builder.setContainerID(containerID);
     builder.setCount(0);
-    deletedBlockLog.addTransaction(containerID, new LinkedList<>());
+    Map<Long, List<Long>> deletedBlocksMap = new HashMap<>();
+    deletedBlocksMap.put(containerID, new LinkedList<>());
+    addTransactions(deletedBlocksMap);
 
     // get should return two transactions for the same container
     blocks = getTransactions(txNum);
@@ -434,13 +416,14 @@
             .build();
 
     ContainerInfo.Builder builder = new ContainerInfo.Builder();
-    builder.setPipelineID(pipeline.getId())
+    builder.setContainerID(containerID)
+        .setPipelineID(pipeline.getId())
         .setReplicationType(pipeline.getType())
         .setReplicationFactor(pipeline.getFactor());
 
     ContainerInfo containerInfo = builder.build();
     Mockito.doReturn(containerInfo).when(containerManager)
-        .getContainer(ContainerID.valueof(containerID));
+        .getContainer(ContainerID.valueOf(containerID));
 
     final Set<ContainerReplica> replicaSet = dns.stream()
         .map(datanodeDetails -> ContainerReplica.newBuilder()
@@ -450,7 +433,7 @@
             .build())
         .collect(Collectors.toSet());
     when(containerManager.getContainerReplicas(
-        ContainerID.valueof(containerID)))
+        ContainerID.valueOf(containerID)))
         .thenReturn(replicaSet);
   }
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 064d24e..a594685 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -28,11 +28,17 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
 import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -55,12 +61,15 @@
 
   private static OzoneConfiguration configuration;
   private static MockNodeManager nodeManager;
-  private static SCMPipelineManager pipelineManager;
-  private static SCMContainerManager containerManager;
+  private static PipelineManagerV2Impl pipelineManager;
+  private static ContainerManagerV2 containerManager;
   private static long size;
   private static File testDir;
   private static EventQueue eventQueue;
+  private static SCMContext scmContext;
   private static SCMMetadataStore scmMetadataStore;
+  private static SCMHAManager scmhaManager;
+  private static SequenceIdGenerator sequenceIdGen;
 
   @BeforeClass
   public static void setUp() throws Exception {
@@ -74,27 +83,41 @@
     configuration.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 16);
     nodeManager = new MockNodeManager(true, 10);
     eventQueue = new EventQueue();
+    scmContext = SCMContext.emptyContext();
     scmMetadataStore = new SCMMetadataStoreImpl(configuration);
+    scmhaManager = MockSCMHAManager.getInstance(true);
+    sequenceIdGen = new SequenceIdGenerator(
+        configuration, scmhaManager, scmMetadataStore.getSequenceIdTable());
+
+    SCMServiceManager serviceManager = new SCMServiceManager();
 
     pipelineManager =
-        new SCMPipelineManager(configuration, nodeManager,
-            scmMetadataStore.getPipelineTable(), eventQueue);
-    pipelineManager.allowPipelineCreation();
+        PipelineManagerV2Impl.newPipelineManager(
+            configuration,
+            scmhaManager,
+            nodeManager,
+            scmMetadataStore.getPipelineTable(),
+            eventQueue,
+            scmContext,
+            serviceManager);
+
     PipelineProvider mockRatisProvider =
         new MockRatisPipelineProvider(nodeManager,
             pipelineManager.getStateManager(), configuration, eventQueue);
     pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
         mockRatisProvider);
-    containerManager = new SCMContainerManager(
-            configuration,
-            scmMetadataStore.getContainerTable(),
-            scmMetadataStore.getStore(),
-            pipelineManager);
-    pipelineManager.triggerPipelineCreation();
+    containerManager = new ContainerManagerImpl(configuration,
+        scmhaManager,
+        sequenceIdGen,
+        pipelineManager,
+        scmMetadataStore.getContainerTable());
+
+    // trigger BackgroundPipelineCreator to take effect.
+    serviceManager.notifyEventTriggered(Event.PRE_CHECK_COMPLETED);
+
     eventQueue.addHandler(CLOSE_CONTAINER,
         new CloseContainerEventHandler(
-                pipelineManager,
-                containerManager));
+            pipelineManager, containerManager, scmContext));
     eventQueue.addHandler(DATANODE_COMMAND, nodeManager);
     // Move all pipelines created by background from ALLOCATED to OPEN state
     Thread.sleep(2000);
@@ -120,7 +143,7 @@
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
     eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(Math.abs(RandomUtils.nextInt())));
+        ContainerID.valueOf(Math.abs(RandomUtils.nextInt())));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
         .contains("Close container Event triggered for container"));
@@ -132,7 +155,7 @@
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
     eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(id));
+        ContainerID.valueOf(id));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
         .contains("Failed to close the container"));
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
index 3434825..09b51f0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
@@ -61,7 +61,7 @@
     queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions);
     queue.processAll(1000L);
     verify(closeContainerEventHandler, times(1))
-        .onMessage(ContainerID.valueof(1L), queue);
+        .onMessage(ContainerID.valueOf(1L), queue);
 
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
new file mode 100644
index 0000000..2ce3fb0
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import java.io.File;
+import java.util.UUID;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.scm.pipeline.MockPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+
+
+/**
+ * Tests to verify the functionality of ContainerManager.
+ */
+public class TestContainerManagerImpl {
+
+  private File testDir;
+  private DBStore dbStore;
+  private ContainerManagerV2 containerManager;
+  private SCMHAManager scmhaManager;
+  private SequenceIdGenerator sequenceIdGen;
+
+  @Before
+  public void setUp() throws Exception {
+    final OzoneConfiguration conf = SCMTestUtils.getConf();
+    testDir = GenericTestUtils.getTestDir(
+        TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+    dbStore = DBStoreBuilder.createDBStore(
+        conf, new SCMDBDefinition());
+    scmhaManager = MockSCMHAManager.getInstance(true);
+    sequenceIdGen = new SequenceIdGenerator(
+        conf, scmhaManager, SCMDBDefinition.SEQUENCE_ID.getTable(dbStore));
+    final PipelineManager pipelineManager = MockPipelineManager.getInstance();
+    pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
+        HddsProtos.ReplicationFactor.THREE);
+    containerManager = new ContainerManagerImpl(conf,
+        scmhaManager, sequenceIdGen, pipelineManager,
+        SCMDBDefinition.CONTAINERS.getTable(dbStore));
+  }
+
+  @After
+  public void cleanup() throws Exception {
+    if(containerManager != null) {
+      containerManager.close();
+    }
+
+    if (dbStore != null) {
+      dbStore.close();
+    }
+
+    FileUtil.fullyDelete(testDir);
+  }
+
+  @Test
+  public void testAllocateContainer() throws Exception {
+    Assert.assertTrue(
+        containerManager.getContainers().isEmpty());
+    final ContainerInfo container = containerManager.allocateContainer(
+        HddsProtos.ReplicationType.RATIS,
+        HddsProtos.ReplicationFactor.THREE, "admin");
+    Assert.assertEquals(1, containerManager.getContainers().size());
+    Assert.assertNotNull(containerManager.getContainer(
+        container.containerID()));
+  }
+
+  @Test
+  public void testUpdateContainerState() throws Exception {
+    final ContainerInfo container = containerManager.allocateContainer(
+        HddsProtos.ReplicationType.RATIS,
+        HddsProtos.ReplicationFactor.THREE, "admin");
+    final ContainerID cid = container.containerID();
+    Assert.assertEquals(HddsProtos.LifeCycleState.OPEN,
+        containerManager.getContainer(cid).getState());
+    containerManager.updateContainerState(cid,
+        HddsProtos.LifeCycleEvent.FINALIZE);
+    Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
+        containerManager.getContainer(cid).getState());
+    containerManager.updateContainerState(cid,
+        HddsProtos.LifeCycleEvent.QUASI_CLOSE);
+    Assert.assertEquals(HddsProtos.LifeCycleState.QUASI_CLOSED,
+        containerManager.getContainer(cid).getState());
+    containerManager.updateContainerState(cid,
+        HddsProtos.LifeCycleEvent.FORCE_CLOSE);
+    Assert.assertEquals(HddsProtos.LifeCycleState.CLOSED,
+        containerManager.getContainer(cid).getState());
+  }
+
+  @Test
+  public void testGetContainers() throws Exception{
+    Assert.assertTrue(
+        containerManager.getContainers().isEmpty());
+
+    ContainerID[] cidArray = new ContainerID[10];
+    for(int i = 0; i < 10; i++){
+      ContainerInfo container = containerManager.allocateContainer(
+          HddsProtos.ReplicationType.RATIS,
+          HddsProtos.ReplicationFactor.THREE, "admin");
+      cidArray[i] = container.containerID();
+    }
+
+    Assert.assertEquals(10,
+        containerManager.getContainers(cidArray[0], 10).size());
+    Assert.assertEquals(10,
+        containerManager.getContainers(cidArray[0], 100).size());
+
+    containerManager.updateContainerState(cidArray[0],
+        HddsProtos.LifeCycleEvent.FINALIZE);
+    Assert.assertEquals(9,
+        containerManager.getContainers(HddsProtos.LifeCycleState.OPEN).size());
+    Assert.assertEquals(1, containerManager
+        .getContainers(HddsProtos.LifeCycleState.CLOSING).size());
+    containerManager.updateContainerState(cidArray[1],
+        HddsProtos.LifeCycleEvent.FINALIZE);
+    Assert.assertEquals(8,
+        containerManager.getContainers(HddsProtos.LifeCycleState.OPEN).size());
+    Assert.assertEquals(2, containerManager
+        .getContainers(HddsProtos.LifeCycleState.CLOSING).size());
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index cc9e49f..b02d518 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hdds.scm.server
     .SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -54,15 +55,15 @@
 public class TestContainerReportHandler {
 
   private NodeManager nodeManager;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private ContainerStateManager containerStateManager;
   private EventPublisher publisher;
 
   @Before
-  public void setup() throws IOException {
+  public void setup() throws IOException, InvalidStateTransitionException {
     final ConfigurationSource conf = new OzoneConfiguration();
     this.nodeManager = new MockNodeManager(true, 10);
-    this.containerManager = Mockito.mock(ContainerManager.class);
+    this.containerManager = Mockito.mock(ContainerManagerV2.class);
     this.containerStateManager = new ContainerStateManager(conf);
     this.publisher = Mockito.mock(EventPublisher.class);
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index 859eef7..b8bae22 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -31,7 +31,6 @@
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -107,7 +106,7 @@
 
   private ContainerInfo allocateContainer() throws IOException {
 
-    PipelineManager pipelineManager = Mockito.mock(SCMPipelineManager.class);
+    PipelineManager pipelineManager = Mockito.mock(PipelineManager.class);
 
     Pipeline pipeline =
         Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED)
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index 1af2f73..7c0c1ec 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -26,6 +26,7 @@
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -35,6 +36,7 @@
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -58,28 +60,28 @@
 public class TestIncrementalContainerReportHandler {
 
   private NodeManager nodeManager;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private ContainerStateManager containerStateManager;
   private EventPublisher publisher;
+  private SCMContext scmContext = SCMContext.emptyContext();
 
   @Before
-  public void setup() throws IOException {
+  public void setup() throws IOException, InvalidStateTransitionException {
     final OzoneConfiguration conf = new OzoneConfiguration();
     final String path =
         GenericTestUtils.getTempPath(UUID.randomUUID().toString());
     Path scmPath = Paths.get(path, "scm-meta");
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-    this.containerManager = Mockito.mock(ContainerManager.class);
+    this.containerManager = Mockito.mock(ContainerManagerV2.class);
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
     SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
-    this.nodeManager =
-        new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap);
+    this.nodeManager = new SCMNodeManager(
+        conf, storageConfig, eventQueue, clusterMap, scmContext);
 
     this.containerStateManager = new ContainerStateManager(conf);
     this.publisher = Mockito.mock(EventPublisher.class);
 
-
     Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class)))
         .thenAnswer(invocation -> containerStateManager
             .getContainer((ContainerID)invocation.getArguments()[0]));
@@ -118,7 +120,8 @@
   @Test
   public void testClosingToClosed() throws IOException {
     final IncrementalContainerReportHandler reportHandler =
-        new IncrementalContainerReportHandler(nodeManager, containerManager);
+        new IncrementalContainerReportHandler(
+            nodeManager, containerManager, scmContext);
     final ContainerInfo container = getContainer(LifeCycleState.CLOSING);
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
@@ -155,7 +158,8 @@
   @Test
   public void testClosingToQuasiClosed() throws IOException {
     final IncrementalContainerReportHandler reportHandler =
-        new IncrementalContainerReportHandler(nodeManager, containerManager);
+        new IncrementalContainerReportHandler(
+            nodeManager, containerManager, scmContext);
     final ContainerInfo container = getContainer(LifeCycleState.CLOSING);
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
@@ -193,7 +197,8 @@
   @Test
   public void testQuasiClosedToClosed() throws IOException {
     final IncrementalContainerReportHandler reportHandler =
-        new IncrementalContainerReportHandler(nodeManager, containerManager);
+        new IncrementalContainerReportHandler(
+            nodeManager, containerManager, scmContext);
     final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
@@ -234,7 +239,8 @@
   @Test
   public void testDeleteContainer() throws IOException {
     final IncrementalContainerReportHandler reportHandler =
-        new IncrementalContainerReportHandler(nodeManager, containerManager);
+        new IncrementalContainerReportHandler(
+            nodeManager, containerManager, scmContext);
     final ContainerInfo container = getContainer(LifeCycleState.CLOSED);
     final DatanodeDetails datanodeOne = randomDatanodeDetails();
     final DatanodeDetails datanodeTwo = randomDatanodeDetails();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
index 7983bcd..ce5c33f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import com.google.common.primitives.Longs;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -33,6 +33,8 @@
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -57,6 +59,7 @@
 import java.util.Optional;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Function;
@@ -86,15 +89,19 @@
   private EventQueue eventQueue;
   private DatanodeCommandHandler datanodeCommandHandler;
   private SimpleMockNodeManager nodeManager;
-  private ContainerManager containerManager;
-  private ConfigurationSource conf;
+  private ContainerManagerV2 containerManager;
+  private OzoneConfiguration conf;
   private SCMNodeManager scmNodeManager;
 
   @Before
   public void setup()
       throws IOException, InterruptedException, NodeNotFoundException {
     conf = new OzoneConfiguration();
-    containerManager = Mockito.mock(ContainerManager.class);
+    conf.setTimeDuration(
+        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
+        0, TimeUnit.SECONDS);
+
+    containerManager = Mockito.mock(ContainerManagerV2.class);
     nodeManager = new SimpleMockNodeManager();
     eventQueue = new EventQueue();
     containerStateManager = new ContainerStateManager(conf);
@@ -102,8 +109,15 @@
     datanodeCommandHandler = new DatanodeCommandHandler();
     eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, datanodeCommandHandler);
 
-    Mockito.when(containerManager.getContainerIDs())
-        .thenAnswer(invocation -> containerStateManager.getAllContainerIDs());
+    Mockito.when(containerManager.getContainers())
+        .thenAnswer(invocation -> {
+          Set<ContainerID> ids = containerStateManager.getAllContainerIDs();
+          List<ContainerInfo> containers = new ArrayList<>();
+          for (ContainerID id : ids) {
+            containers.add(containerStateManager.getContainer(id));
+          }
+          return containers;
+        });
 
     Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class)))
         .thenAnswer(invocation -> containerStateManager
@@ -139,28 +153,43 @@
         Mockito.any(DatanodeDetails.class)))
         .thenReturn(NodeStatus.inServiceHealthy());
 
+    SCMServiceManager serviceManager = new SCMServiceManager();
+
     replicationManager = new ReplicationManager(
-        new ReplicationManagerConfiguration(),
+        conf,
         containerManager,
         containerPlacementPolicy,
         eventQueue,
+        SCMContext.emptyContext(),
+        serviceManager,
         new LockManager<>(conf),
         nodeManager);
-    replicationManager.start();
+
+    serviceManager.notifyStatusChanged();
     Thread.sleep(100L);
   }
 
   private void createReplicationManager(ReplicationManagerConfiguration rmConf)
       throws InterruptedException {
+    OzoneConfiguration config = new OzoneConfiguration();
+    config.setTimeDuration(
+        HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
+        0, TimeUnit.SECONDS);
+    config.setFromObject(rmConf);
+
+    SCMServiceManager serviceManager = new SCMServiceManager();
+
     replicationManager = new ReplicationManager(
-        rmConf,
+        config,
         containerManager,
         containerPlacementPolicy,
         eventQueue,
-        new LockManager<ContainerID>(conf),
+        SCMContext.emptyContext(),
+        serviceManager,
+        new LockManager<ContainerID>(config),
         nodeManager);
 
-    replicationManager.start();
+    serviceManager.notifyStatusChanged();
     Thread.sleep(100L);
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index 12c62a9..8f9bc5d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -42,10 +42,13 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -66,7 +69,7 @@
 public class TestSCMContainerManager {
   private static SCMContainerManager containerManager;
   private static MockNodeManager nodeManager;
-  private static SCMPipelineManager pipelineManager;
+  private static PipelineManagerV2Impl pipelineManager;
   private static File testDir;
   private static XceiverClientManager xceiverClientManager;
   private static Random random;
@@ -92,10 +95,14 @@
     }
     nodeManager = new MockNodeManager(true, 10);
     SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf);
-    pipelineManager =
-        new SCMPipelineManager(conf, nodeManager,
-            scmMetadataStore.getPipelineTable(), new EventQueue());
-    pipelineManager.allowPipelineCreation();
+    pipelineManager = PipelineManagerV2Impl.newPipelineManager(
+        conf,
+        MockSCMHAManager.getInstance(true),
+        nodeManager,
+        scmMetadataStore.getPipelineTable(),
+        new EventQueue(),
+        SCMContext.emptyContext(),
+        new SCMServiceManager());
     containerManager = new SCMContainerManager(conf,
         scmMetadataStore.getContainerTable(),
         scmMetadataStore.getStore(),
@@ -283,7 +290,7 @@
   @Test
   public void testgetNoneExistentContainer() {
     try {
-      containerManager.getContainer(ContainerID.valueof(
+      containerManager.getContainer(ContainerID.valueOf(
           random.nextInt() & Integer.MAX_VALUE));
       Assert.fail();
     } catch (ContainerNotFoundException ex) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
index f2e4968..1e9d830 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
@@ -33,6 +33,7 @@
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.server
@@ -51,7 +52,7 @@
 public class TestUnknownContainerReport {
 
   private NodeManager nodeManager;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private ContainerStateManager containerStateManager;
   private EventPublisher publisher;
 
@@ -59,7 +60,7 @@
   public void setup() throws IOException {
     final ConfigurationSource conf = new OzoneConfiguration();
     this.nodeManager = new MockNodeManager(true, 10);
-    this.containerManager = Mockito.mock(ContainerManager.class);
+    this.containerManager = Mockito.mock(ContainerManagerV2.class);
     this.containerStateManager = new ContainerStateManager(conf);
     this.publisher = Mockito.mock(EventPublisher.class);
 
@@ -103,7 +104,7 @@
    */
   private void sendContainerReport(OzoneConfiguration conf) {
     ContainerReportHandler reportHandler = new ContainerReportHandler(
-        nodeManager, containerManager, conf);
+        nodeManager, containerManager, SCMContext.emptyContext(), conf);
 
     ContainerInfo container = getContainer(LifeCycleState.CLOSED);
     Iterator<DatanodeDetails> nodeIterator = nodeManager
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
index a2426d1..fab2c68 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
@@ -39,7 +39,7 @@
   @Test
   public void testInsert() throws SCMException {
     ContainerAttribute<Integer> containerAttribute = new ContainerAttribute<>();
-    ContainerID id = new ContainerID(42);
+    ContainerID id = ContainerID.valueOf(42);
     containerAttribute.insert(1, id);
     Assert.assertEquals(1,
         containerAttribute.getCollection(1).size());
@@ -47,7 +47,7 @@
 
     // Insert again and verify that the new ContainerId is inserted.
     ContainerID newId =
-        new ContainerID(42);
+        ContainerID.valueOf(42);
     containerAttribute.insert(1, newId);
     Assert.assertEquals(1,
         containerAttribute.getCollection(1).size());
@@ -59,7 +59,7 @@
     ContainerAttribute<Integer> containerAttribute = new ContainerAttribute<>();
 
     for (int x = 1; x < 42; x++) {
-      containerAttribute.insert(1, new ContainerID(x));
+      containerAttribute.insert(1, ContainerID.valueOf(x));
     }
     Assert.assertTrue(containerAttribute.hasKey(1));
     for (int x = 1; x < 42; x++) {
@@ -67,7 +67,7 @@
     }
 
     Assert.assertFalse(containerAttribute.hasContainerID(1,
-        new ContainerID(42)));
+        ContainerID.valueOf(42)));
   }
 
   @Test
@@ -76,7 +76,7 @@
     ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
     for (String k : keyslist) {
       for (int x = 1; x < 101; x++) {
-        containerAttribute.insert(k, new ContainerID(x));
+        containerAttribute.insert(k, ContainerID.valueOf(x));
       }
     }
     for (String k : keyslist) {
@@ -96,16 +96,16 @@
 
     for (String k : keyslist) {
       for (int x = 1; x < 101; x++) {
-        containerAttribute.insert(k, new ContainerID(x));
+        containerAttribute.insert(k, ContainerID.valueOf(x));
       }
     }
     for (int x = 1; x < 101; x += 2) {
-      containerAttribute.remove("Key1", new ContainerID(x));
+      containerAttribute.remove("Key1", ContainerID.valueOf(x));
     }
 
     for (int x = 1; x < 101; x += 2) {
       Assert.assertFalse(containerAttribute.hasContainerID("Key1",
-          new ContainerID(x)));
+          ContainerID.valueOf(x)));
     }
 
     Assert.assertEquals(100,
@@ -125,7 +125,7 @@
     String key3 = "Key3";
 
     ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
-    ContainerID id = new ContainerID(42);
+    ContainerID id = ContainerID.valueOf(42);
 
     containerAttribute.insert(key1, id);
     Assert.assertTrue(containerAttribute.hasContainerID(key1, id));
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
index 3c7c952..69b038c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
@@ -404,7 +404,7 @@
       DatanodeDetails dn = r.getDatanodeDetails();
 
       ContainerReplica replace = new ContainerReplica.ContainerReplicaBuilder()
-          .setContainerID(new ContainerID(1))
+          .setContainerID(ContainerID.valueOf(1))
           .setContainerState(OPEN)
           .setDatanodeDetails(dn)
           .setOriginNodeId(dn.getUuid())
@@ -446,7 +446,7 @@
       DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
       dn.setPersistedOpState(s);
       replica.add(new ContainerReplica.ContainerReplicaBuilder()
-          .setContainerID(new ContainerID(1))
+          .setContainerID(ContainerID.valueOf(1))
           .setContainerState(CLOSED)
           .setDatanodeDetails(dn)
           .setOriginNodeId(dn.getUuid())
@@ -458,7 +458,7 @@
 
   private ContainerInfo createContainer(HddsProtos.LifeCycleState state) {
     return new ContainerInfo.Builder()
-        .setContainerID(new ContainerID(1).getId())
+        .setContainerID(ContainerID.valueOf(1).getId())
         .setState(state)
         .build();
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
new file mode 100644
index 0000000..681dec8
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol;
+import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManagerV2;
+import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.apache.ratis.server.RaftServer;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Proxy;
+import java.math.BigInteger;
+import java.security.KeyPair;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+/**
+ * Tests on {@link org.apache.hadoop.hdds.scm.metadata.Replicate}.
+ */
+public class TestReplicationAnnotation {
+  private SCMHAInvocationHandler scmhaInvocationHandler;
+  private SCMRatisServer scmRatisServer;
+
+  @Before
+  public void setup() {
+    scmRatisServer = new SCMRatisServer() {
+      @Override
+      public void start() throws IOException {
+      }
+
+      @Override
+      public void registerStateMachineHandler(
+          SCMRatisProtocol.RequestType handlerType, Object handler) {
+      }
+
+      @Override
+      public SCMRatisResponse submitRequest(SCMRatisRequest request)
+          throws IOException, ExecutionException, InterruptedException {
+        throw new IOException("submitRequest is called.");
+      }
+
+      @Override
+      public void stop() throws IOException {
+      }
+
+      @Override
+      public RaftServer.Division getDivision() {
+        return null;
+      }
+
+      @Override
+      public List<String> getRatisRoles() {
+        return null;
+      }
+
+      @Override
+      public NotLeaderException triggerNotLeaderException() {
+        return null;
+      }
+
+      @Override
+      public boolean addSCM(AddSCMRequest request)
+          throws IOException {
+        return false;
+      }
+
+      @Override
+      public SCMStateMachine getSCMStateMachine() {
+        return null;
+      }
+    };
+  }
+
+  @Test
+  public void testReplicateAnnotationBasic() throws Throwable {
+
+    scmhaInvocationHandler = new SCMHAInvocationHandler(
+        RequestType.CONTAINER, null, scmRatisServer);
+
+    ContainerStateManagerV2 proxy =
+        (ContainerStateManagerV2) Proxy.newProxyInstance(
+        SCMHAInvocationHandler.class.getClassLoader(),
+        new Class<?>[]{ContainerStateManagerV2.class}, scmhaInvocationHandler);
+
+    try {
+      proxy.addContainer(HddsProtos.ContainerInfoProto.getDefaultInstance());
+      Assert.fail("Cannot reach here: should have seen a IOException");
+    } catch (IOException ignore) {
+      Assert.assertNotNull(ignore.getMessage() != null);
+      Assert.assertEquals("submitRequest is called.",
+          ignore.getMessage());
+    }
+
+    scmhaInvocationHandler = new SCMHAInvocationHandler(
+        RequestType.CERT_STORE, null, scmRatisServer);
+
+    CertificateStore certificateStore =
+        (CertificateStore) Proxy.newProxyInstance(
+        SCMHAInvocationHandler.class.getClassLoader(),
+        new Class<?>[]{CertificateStore.class}, scmhaInvocationHandler);
+
+    KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
+    try {
+      certificateStore.storeValidCertificate(BigInteger.valueOf(100L),
+          KeyStoreTestUtil.generateCertificate("CN=Test", keyPair, 30,
+          "SHA256withRSA"), HddsProtos.NodeType.SCM);
+      Assert.fail("Cannot reach here: should have seen a IOException");
+    } catch (IOException ignore) {
+      Assert.assertNotNull(ignore.getMessage() != null);
+      Assert.assertEquals("submitRequest is called.",
+          ignore.getMessage());
+    }
+
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
new file mode 100644
index 0000000..c809880
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+
+/**
+ * Test for SCMContext.
+ */
+public class TestSCMContext {
+  @Test
+  public void testRaftOperations() {
+    // start as follower
+    SCMContext scmContext =
+        new SCMContext.Builder().setLeader(false).setTerm(0).build();
+
+    assertFalse(scmContext.isLeader());
+
+    // become leader
+    scmContext.updateLeaderAndTerm(true, 10);
+    assertTrue(scmContext.isLeader());
+    try {
+      assertEquals(scmContext.getTermOfLeader(), 10);
+    } catch (NotLeaderException e) {
+      fail("Should not throw nle.");
+    }
+
+    // step down
+    scmContext.updateLeaderAndTerm(false, 0);
+    assertFalse(scmContext.isLeader());
+  }
+
+  @Test
+  public void testSafeModeOperations() {
+    // in safe mode
+    SCMContext scmContext = new SCMContext.Builder()
+        .setIsInSafeMode(true)
+        .setIsPreCheckComplete(false)
+        .build();
+
+    assertTrue(scmContext.isInSafeMode());
+    assertFalse(scmContext.isPreCheckComplete());
+
+    // in safe mode, pass preCheck
+    scmContext.updateSafeModeStatus(new SafeModeStatus(true, true));
+    assertTrue(scmContext.isInSafeMode());
+    assertTrue(scmContext.isPreCheckComplete());
+
+    // out of safe mode
+    scmContext.updateSafeModeStatus(new SafeModeStatus(false, true));
+    assertFalse(scmContext.isInSafeMode());
+    assertTrue(scmContext.isPreCheckComplete());
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
new file mode 100644
index 0000000..f913a70
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.ha.ConfUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_DIRS;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
+
+public class TestSCMHAConfiguration {
+  private OzoneConfiguration conf;
+
+  @Before
+  public void setup() {
+    conf = new OzoneConfiguration();
+  }
+
+  @Test
+  public void testSetStorageDir() {
+    SCMHAConfiguration scmhaConfiguration = conf.getObject(
+        SCMHAConfiguration.class);
+    scmhaConfiguration.setRatisStorageDir("scm-ratis");
+    conf.setFromObject(scmhaConfiguration);
+
+    scmhaConfiguration = conf.getObject(
+        SCMHAConfiguration.class);
+    Assert.assertEquals("scm-ratis", scmhaConfiguration.getRatisStorageDir());
+  }
+
+  @Test
+  public void testRaftLogPurgeEnabled() {
+    SCMHAConfiguration scmhaConfiguration = conf.getObject(
+        SCMHAConfiguration.class);
+    scmhaConfiguration.setRaftLogPurgeEnabled(true);
+    conf.setFromObject(scmhaConfiguration);
+
+    scmhaConfiguration = conf.getObject(
+        SCMHAConfiguration.class);
+    Assert.assertEquals(true, scmhaConfiguration.getRaftLogPurgeEnabled());
+  }
+
+
+  @Test
+  public void testSCMHAConfig() throws Exception {
+    String scmServiceId = "scmserviceId";
+    conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
+
+    String[] nodes = new String[] {"scm1", "scm2", "scm3"};
+    conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId,
+        "scm1,scm2,scm3");
+    conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1");
+
+    int port = 9880;
+    int i = 1;
+    for (String nodeId : nodes) {
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:"+port++);
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+          scmServiceId, nodeId), port);
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY,
+          scmServiceId, nodeId), "172.28.9.1");
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:"+port++);
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
+          scmServiceId, nodeId), port);
+      conf.set(ConfUtils.addKeySuffixes(
+          OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY, scmServiceId, nodeId),
+          "172.28.9.1");
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:"+port++);
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_PORT_KEY,
+          scmServiceId, nodeId), port);
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_BIND_HOST_KEY,
+          scmServiceId, nodeId), "172.28.9.1");
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:"+port++);
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
+          scmServiceId, nodeId), port);
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_BIND_HOST_KEY,
+          scmServiceId, nodeId), "172.28.9.1");
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost:"+port++);
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_BIND_HOST_KEY,
+          scmServiceId, nodeId), "172.28.9.1");
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DB_DIRS,
+          scmServiceId, nodeId), "/var/scm-metadata"+ i++);
+
+      conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY,
+          scmServiceId, nodeId), "localhost");
+
+      conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_RATIS_PORT_KEY,
+          scmServiceId, nodeId), port++);
+    }
+
+
+    SCMHANodeDetails.loadSCMHAConfig(conf);
+
+    port = 9880;
+
+    // Validate configs.
+    Assert.assertEquals("localhost:"+port++,
+        conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+        scmServiceId, "scm1")));
+    Assert.assertEquals(port,
+        conf.getInt(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
+        scmServiceId, "scm1"), 9999));
+    Assert.assertEquals("172.28.9.1",
+        conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY,
+            scmServiceId, "scm1")));
+
+
+    Assert.assertEquals("localhost:"+port++,
+        conf.get(ConfUtils.addKeySuffixes(
+            OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, scmServiceId, "scm1")));
+    Assert.assertEquals(port, conf.getInt(ConfUtils.addKeySuffixes(
+        OZONE_SCM_SECURITY_SERVICE_PORT_KEY, scmServiceId, "scm1"), 9999));
+    Assert.assertEquals("172.28.9.1",
+        conf.get(ConfUtils.addKeySuffixes(
+            OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY, scmServiceId, "scm1")));
+
+
+    Assert.assertEquals("localhost:"+port++,
+        conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY,
+            scmServiceId, "scm1")));
+    Assert.assertEquals(port,
+        conf.getInt(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_PORT_KEY,
+            scmServiceId, "scm1"), 9999));
+    Assert.assertEquals("172.28.9.1", conf.get(
+        ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_BIND_HOST_KEY, scmServiceId,
+        "scm1")));
+
+    Assert.assertEquals("localhost:"+port++,
+        conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY,
+            scmServiceId, "scm1")));
+    Assert.assertEquals(port,
+        conf.getInt(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY,
+            scmServiceId, "scm1"), 9999));
+    Assert.assertEquals("172.28.9.1", conf.get(
+        ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_BIND_HOST_KEY, scmServiceId,
+        "scm1")));
+
+
+    Assert.assertEquals("localhost:"+port++,
+        conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_ADDRESS_KEY,
+        scmServiceId, "scm1")));
+    Assert.assertEquals("172.28.9.1",
+        conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_BIND_HOST_KEY,
+        scmServiceId, "scm1")));
+
+    Assert.assertEquals("localhost", conf.get(ConfUtils.addKeySuffixes(
+        OZONE_SCM_ADDRESS_KEY, scmServiceId,
+        "scm1")));
+
+    Assert.assertEquals("/var/scm-metadata1",
+        conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_DB_DIRS, scmServiceId,
+        "scm1")));
+
+    Assert.assertEquals(port++,
+        conf.getInt(ConfUtils.addKeySuffixes(OZONE_SCM_RATIS_PORT_KEY,
+        scmServiceId, "scm1"), 9999));
+
+
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java
new file mode 100644
index 0000000..f5913aa
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.ratis.protocol.Message;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType.PIPELINE;
+
+/**
+ * Test for SCMRatisRequest.
+ */
+public class TestSCMRatisRequest {
+
+  @Test
+  public void testEncodeAndDecodeSuccess() throws Exception {
+    PipelineID pipelineID = PipelineID.randomId();
+    Object[] args = new Object[] {pipelineID.getProtobuf()};
+    String operation = "test";
+    SCMRatisRequest request = SCMRatisRequest.of(PIPELINE, operation,
+        new Class[]{pipelineID.getProtobuf().getClass()}, args);
+    Assert.assertEquals(operation,
+        SCMRatisRequest.decode(request.encode()).getOperation());
+    Assert.assertEquals(args[0],
+        SCMRatisRequest.decode(request.encode()).getArguments()[0]);
+  }
+
+  @Test(expected = InvalidProtocolBufferException.class)
+  public void testEncodeWithNonProto() throws Exception{
+    PipelineID pipelineID = PipelineID.randomId();
+    // Non proto args
+    Object[] args = new Object[] {pipelineID};
+    SCMRatisRequest request = SCMRatisRequest.of(PIPELINE, "test",
+        new Class[]{pipelineID.getClass()}, args);
+    // Should throw exception there.
+    request.encode();
+  }
+
+  @Test(expected = InvalidProtocolBufferException.class)
+  public void testDecodeWithNonProto() throws Exception {
+    // Non proto message
+    Message message = Message.valueOf("randomMessage");
+    // Should throw exception there.
+    SCMRatisRequest.decode(message);
+  }
+
+  @Test
+  public void testEncodeAndDecodeWithList() throws Exception {
+    List<HddsProtos.PipelineID> pids = new ArrayList<>();
+    pids.add(PipelineID.randomId().getProtobuf());
+    pids.add(PipelineID.randomId().getProtobuf());
+    pids.add(PipelineID.randomId().getProtobuf());
+    Object[] args = new Object[] {pids};
+    String operation = "test";
+    SCMRatisRequest request = SCMRatisRequest.of(PIPELINE, operation,
+        new Class[]{pids.getClass()}, args);
+    Assert.assertEquals(operation,
+        SCMRatisRequest.decode(request.encode()).getOperation());
+    Assert.assertEquals(args[0],
+        SCMRatisRequest.decode(request.encode()).getArguments()[0]);
+  }
+
+  @Test
+  public void testEncodeAndDecodeOfLong() throws Exception {
+    final Long value = 10L;
+    String operation = "test";
+    SCMRatisRequest request = SCMRatisRequest.of(PIPELINE, operation,
+        new Class[]{value.getClass()}, value);
+    Assert.assertEquals(operation,
+        SCMRatisRequest.decode(request.encode()).getOperation());
+    Assert.assertEquals(value,
+        SCMRatisRequest.decode(request.encode()).getArguments()[0]);
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java
new file mode 100644
index 0000000..7ecbf2a
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientReply;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.RaftGroupMemberId;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.protocol.exceptions.LeaderNotReadyException;
+import org.apache.ratis.protocol.exceptions.RaftException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test for SCMRatisResponse.
+ */
+public class TestSCMRatisResponse {
+  private RaftGroupMemberId raftId;
+
+  @Before
+  public void init() {
+    raftId = RaftGroupMemberId.valueOf(
+        RaftPeerId.valueOf("peer"), RaftGroupId.randomId());
+  }
+
+  @Test
+  public void testEncodeAndDecodeSuccess() throws Exception {
+    RaftClientReply reply = RaftClientReply.newBuilder()
+        .setClientId(ClientId.randomId())
+        .setServerId(raftId)
+        .setGroupId(RaftGroupId.emptyGroupId())
+        .setCallId(1L)
+        .setSuccess(true)
+        .setMessage(Message.EMPTY)
+        .setException(null)
+        .setLogIndex(1L)
+        .build();
+    SCMRatisResponse response = SCMRatisResponse.decode(reply);
+    Assert.assertTrue(response.isSuccess());
+    Assert.assertEquals(Message.EMPTY,
+        SCMRatisResponse.encode(response.getResult()));
+  }
+
+  @Test
+  public void testDecodeOperationFailureWithException() throws Exception {
+    RaftClientReply reply = RaftClientReply.newBuilder()
+        .setClientId(ClientId.randomId())
+        .setServerId(raftId)
+        .setGroupId(RaftGroupId.emptyGroupId())
+        .setCallId(1L)
+        .setSuccess(false)
+        .setMessage(Message.EMPTY)
+        .setException(new LeaderNotReadyException(raftId))
+        .setLogIndex(1L)
+        .build();
+    SCMRatisResponse response = SCMRatisResponse.decode(reply);
+    Assert.assertFalse(response.isSuccess());
+    Assert.assertTrue(response.getException() instanceof RaftException);
+    Assert.assertNull(response.getResult());
+  }
+
+  @Test(expected =  InvalidProtocolBufferException.class)
+  public void testEncodeFailureWithNonProto() throws Exception {
+    // Non proto input
+    Message message = Message.valueOf("test");
+    // Should fail with exception.
+    SCMRatisResponse.encode(message);
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMServiceManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMServiceManager.java
new file mode 100644
index 0000000..786a893
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMServiceManager.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestSCMServiceManager {
+  @Test
+  public void testServiceRunWhenLeader() {
+    SCMContext scmContext = new SCMContext.Builder()
+        .setLeader(false)
+        .setTerm(1)
+        .setIsInSafeMode(true)
+        .setIsPreCheckComplete(false)
+        .build();
+
+    // A service runs when it is leader.
+    SCMService serviceRunWhenLeader = new SCMService() {
+      private ServiceStatus serviceStatus = ServiceStatus.PAUSING;
+
+      @Override
+      public void notifyStatusChanged() {
+        if (scmContext.isLeader()) {
+          serviceStatus = ServiceStatus.RUNNING;
+        } else {
+          serviceStatus = ServiceStatus.PAUSING;
+        }
+      }
+
+      @Override
+      public boolean shouldRun() {
+        return serviceStatus == ServiceStatus.RUNNING;
+      }
+
+      @Override
+      public String getServiceName() {
+        return "serviceRunWhenLeader";
+      }
+
+      @Override
+      public void start() {
+      }
+
+      @Override
+      public void stop() {
+      }
+    };
+
+    SCMServiceManager serviceManager = new SCMServiceManager();
+    serviceManager.register(serviceRunWhenLeader);
+
+    // PAUSING at the beginning.
+    assertFalse(serviceRunWhenLeader.shouldRun());
+
+    // PAUSING when out of safe mode.
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(false, true));
+    serviceManager.notifyStatusChanged();
+    assertFalse(serviceRunWhenLeader.shouldRun());
+
+    // RUNNING when becoming leader.
+    scmContext.updateLeaderAndTerm(true, 2);
+    serviceManager.notifyStatusChanged();
+    assertTrue(serviceRunWhenLeader.shouldRun());
+
+    // RUNNING when in safe mode.
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(true, false));
+    serviceManager.notifyStatusChanged();
+    assertTrue(serviceRunWhenLeader.shouldRun());
+
+    // PAUSING when stepping down.
+    scmContext.updateLeaderAndTerm(false, 3);
+    serviceManager.notifyStatusChanged();
+    assertFalse(serviceRunWhenLeader.shouldRun());
+  }
+
+  @Test
+  public void setServiceRunWhenLeaderAndOutOfSafeMode() {
+    SCMContext scmContext = new SCMContext.Builder()
+        .setLeader(false)
+        .setTerm(1)
+        .setIsInSafeMode(true)
+        .setIsPreCheckComplete(false)
+        .build();
+
+    // A service runs when it is leader and out of safe mode.
+    SCMService serviceRunWhenLeaderAndOutOfSafeMode = new SCMService() {
+      private ServiceStatus serviceStatus = ServiceStatus.PAUSING;
+
+      @Override
+      public void notifyStatusChanged() {
+        if (scmContext.isLeader() && !scmContext.isInSafeMode()) {
+          serviceStatus = ServiceStatus.RUNNING;
+        } else {
+          serviceStatus = ServiceStatus.PAUSING;
+        }
+      }
+
+      @Override
+      public boolean shouldRun() {
+        return serviceStatus == ServiceStatus.RUNNING;
+      }
+
+      @Override
+      public String getServiceName() {
+        return "serviceRunWhenLeaderAndOutOfSafeMode";
+      }
+
+      @Override
+      public void start() {
+      }
+
+      @Override
+      public void stop() {
+      }
+    };
+
+    SCMServiceManager serviceManager = new SCMServiceManager();
+    serviceManager.register(serviceRunWhenLeaderAndOutOfSafeMode);
+
+    // PAUSING at the beginning.
+    assertFalse(serviceRunWhenLeaderAndOutOfSafeMode.shouldRun());
+
+    // PAUSING when out of safe mode.
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(false, true));
+    serviceManager.notifyStatusChanged();
+    assertFalse(serviceRunWhenLeaderAndOutOfSafeMode.shouldRun());
+
+    // RUNNING when becoming leader.
+    scmContext.updateLeaderAndTerm(true, 2);
+    serviceManager.notifyStatusChanged();
+    assertTrue(serviceRunWhenLeaderAndOutOfSafeMode.shouldRun());
+
+    // PAUSING when in safe mode.
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(true, false));
+    serviceManager.notifyStatusChanged();
+    assertFalse(serviceRunWhenLeaderAndOutOfSafeMode.shouldRun());
+
+    // PAUSING when stepping down.
+    scmContext.updateLeaderAndTerm(false, 3);
+    serviceManager.notifyStatusChanged();
+    assertFalse(serviceRunWhenLeaderAndOutOfSafeMode.shouldRun());
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java
new file mode 100644
index 0000000..17a7f91
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * <p>http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * <p>Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.ha;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBTransactionBufferImpl;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SEQUENCE_ID_BATCH_SIZE;
+
+public class TestSequenceIDGenerator {
+  @Test
+  public void testSequenceIDGenUponNonRatis() throws Exception {
+    OzoneConfiguration conf = SCMTestUtils.getConf();
+    SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf);
+    scmMetadataStore.start(conf);
+
+    SCMHAManager scmHAManager = MockSCMHAManager
+        .getInstance(true, new SCMDBTransactionBufferImpl());
+
+    SequenceIdGenerator sequenceIdGen = new SequenceIdGenerator(
+        conf, scmHAManager, scmMetadataStore.getSequenceIdTable());
+
+    // the first batch is [1, 1000]
+    Assert.assertEquals(1L, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(2L, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(3L, sequenceIdGen.getNextId("someKey"));
+
+    Assert.assertEquals(1L, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(2L, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(3L, sequenceIdGen.getNextId("otherKey"));
+
+    // default batchSize is 1000, the next batch is [1001, 2000]
+    sequenceIdGen.invalidateBatch();
+    Assert.assertEquals(1001, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(1002, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(1003, sequenceIdGen.getNextId("someKey"));
+
+    Assert.assertEquals(1001, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(1002, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(1003, sequenceIdGen.getNextId("otherKey"));
+
+    // default batchSize is 1000, the next batch is [2001, 3000]
+    sequenceIdGen.invalidateBatch();
+    Assert.assertEquals(2001, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(2002, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(2003, sequenceIdGen.getNextId("someKey"));
+
+    Assert.assertEquals(2001, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(2002, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(2003, sequenceIdGen.getNextId("otherKey"));
+  }
+
+  @Test
+  public void testSequenceIDGenUponRatis() throws Exception {
+    OzoneConfiguration conf = SCMTestUtils.getConf();
+    
+    // change batchSize to 100
+    conf.setInt(OZONE_SCM_SEQUENCE_ID_BATCH_SIZE, 100);
+
+    SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf);
+    scmMetadataStore.start(conf);
+
+    SCMHAManager scmHAManager = MockSCMHAManager.getInstance(true);
+
+    SequenceIdGenerator sequenceIdGen = new SequenceIdGenerator(
+        conf, scmHAManager, scmMetadataStore.getSequenceIdTable());
+
+    // the first batch is [1, 100]
+    Assert.assertEquals(1L, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(2L, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(3L, sequenceIdGen.getNextId("someKey"));
+
+    Assert.assertEquals(1L, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(2L, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(3L, sequenceIdGen.getNextId("otherKey"));
+
+    // the next batch is [101, 200]
+    sequenceIdGen.invalidateBatch();
+    Assert.assertEquals(101, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(102, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(103, sequenceIdGen.getNextId("someKey"));
+
+    Assert.assertEquals(101, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(102, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(103, sequenceIdGen.getNextId("otherKey"));
+
+    // the next batch is [201, 300]
+    sequenceIdGen.invalidateBatch();
+    Assert.assertEquals(201, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(202, sequenceIdGen.getNextId("someKey"));
+    Assert.assertEquals(203, sequenceIdGen.getNextId("someKey"));
+
+    Assert.assertEquals(201, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(202, sequenceIdGen.getNextId("otherKey"));
+    Assert.assertEquals(203, sequenceIdGen.getNextId("otherKey"));
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestBigIntegerCodec.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestBigIntegerCodec.java
new file mode 100644
index 0000000..9123382
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestBigIntegerCodec.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.math.BigInteger;
+
+/**
+ * Class to test BigIntegerCodec serialize and deserialize.
+ */
+public class TestBigIntegerCodec {
+
+  @Test
+  public void testCodec() {
+    BigIntegerCodec bigIntegerCodec = new BigIntegerCodec();
+
+    BigInteger bigInteger = BigInteger.valueOf(100);
+    ByteString byteString = bigIntegerCodec.serialize(bigInteger);
+
+    BigInteger actual =
+        (BigInteger) bigIntegerCodec.deserialize(BigInteger.class, byteString);
+    Assert.assertEquals(bigInteger, actual);
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestX509CertificateCodec.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestX509CertificateCodec.java
new file mode 100644
index 0000000..ffa874d
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/io/TestX509CertificateCodec.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.ha.io;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.security.KeyPair;
+import java.security.cert.X509Certificate;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * Class to test X509CertificateCodec serialize and deserialize.
+ */
+public class TestX509CertificateCodec {
+
+  @Test
+  public void codec() throws Exception {
+    KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
+    X509Certificate x509Certificate =
+        KeyStoreTestUtil.generateCertificate("CN=Test", keyPair, 30,
+        "SHA256withRSA");
+
+    X509CertificateCodec x509CertificateCodec = new X509CertificateCodec();
+    ByteString byteString = x509CertificateCodec.serialize(x509Certificate);
+
+    X509Certificate actual = (X509Certificate)
+        x509CertificateCodec.deserialize(X509Certificate.class, byteString);
+
+    Assert.assertEquals(x509Certificate, actual);
+
+  }
+
+  @Test(expected = InvalidProtocolBufferException.class)
+  public void testCodecError() throws Exception {
+
+    X509CertificateCodec x509CertificateCodec = new X509CertificateCodec();
+    ByteString byteString = ByteString.copyFrom("dummy".getBytes(UTF_8));
+
+    x509CertificateCodec.deserialize(X509Certificate.class, byteString);
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 37a18b6..eb76e9f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -36,10 +36,13 @@
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -106,7 +109,7 @@
     Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
 
     SCMNodeManager nodeManager = new SCMNodeManager(config,
-        storageConfig, eventQueue, null);
+        storageConfig, eventQueue, null, SCMContext.emptyContext());
     return nodeManager;
   }
 
@@ -115,8 +118,15 @@
     EventQueue eventQueue = new EventQueue();
 
     PipelineManager pipelineManager =
-        new SCMPipelineManager(config, scmNodeManager,
-            scmMetadataStore.getPipelineTable(), eventQueue);
+        PipelineManagerV2Impl.newPipelineManager(
+            config,
+            MockSCMHAManager.getInstance(true),
+            scmNodeManager,
+            scmMetadataStore.getPipelineTable(),
+            eventQueue,
+            SCMContext.emptyContext(),
+            new SCMServiceManager());
+
     return new SCMContainerManager(config, scmMetadataStore.getContainerTable(),
         scmMetadataStore.getStore(),
         pipelineManager);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index d779967..af762d6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -425,7 +425,7 @@
   private Set<ContainerID> generateContainers(int count) {
     Set<ContainerID> containers = new HashSet<>();
     for (int i=0; i<count; i++) {
-      containers.add(new ContainerID(i));
+      containers.add(ContainerID.valueOf(i));
     }
     return containers;
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index d7c7d2e..bd2959d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -42,11 +42,10 @@
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -54,7 +53,7 @@
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .NodeReportFromDatanode;
@@ -79,9 +78,9 @@
 
   private StorageContainerManager scm;
   private SCMNodeManager nodeManager;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private NodeReportHandler nodeReportHandler;
-  private SCMPipelineManager pipelineManager;
+  private PipelineManagerV2Impl pipelineManager;
   private DeadNodeHandler deadNodeHandler;
   private EventPublisher publisher;
   private EventQueue eventQueue;
@@ -97,10 +96,10 @@
         TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
     eventQueue = new EventQueue();
-    scm = HddsTestUtils.getScm(conf);
+    scm = TestUtils.getScm(conf);
     nodeManager = (SCMNodeManager) scm.getScmNodeManager();
     pipelineManager =
-        (SCMPipelineManager)scm.getPipelineManager();
+        (PipelineManagerV2Impl)scm.getPipelineManager();
     PipelineProvider mockRatisProvider =
         new MockRatisPipelineProvider(nodeManager,
             pipelineManager.getStateManager(), conf);
@@ -165,7 +164,6 @@
 
     LambdaTestUtils.await(120000, 1000,
         () -> {
-          pipelineManager.triggerPipelineCreation();
           System.out.println(pipelineManager.getPipelines(RATIS, THREE).size());
           System.out.println(pipelineManager.getPipelines(RATIS, ONE).size());
           return pipelineManager.getPipelines(RATIS, THREE).size() > 3;
@@ -201,15 +199,16 @@
     deadNodeHandler.onMessage(datanode1, publisher);
 
     Set<ContainerReplica> container1Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container1.getContainerID()));
+        .getContainerReplicas(ContainerID.valueOf(container1.getContainerID()));
     Assert.assertEquals(2, container1Replicas.size());
 
     Set<ContainerReplica> container2Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container2.getContainerID()));
+        .getContainerReplicas(ContainerID.valueOf(container2.getContainerID()));
     Assert.assertEquals(2, container2Replicas.size());
 
     Set<ContainerReplica> container3Replicas = containerManager
-            .getContainerReplicas(new ContainerID(container3.getContainerID()));
+            .getContainerReplicas(
+                ContainerID.valueOf(container3.getContainerID()));
     Assert.assertEquals(1, container3Replicas.size());
 
     // Now set the node to anything other than IN_MAINTENANCE and the relevant
@@ -219,30 +218,30 @@
     deadNodeHandler.onMessage(datanode1, publisher);
 
     container1Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container1.getContainerID()));
+        .getContainerReplicas(ContainerID.valueOf(container1.getContainerID()));
     Assert.assertEquals(1, container1Replicas.size());
     Assert.assertEquals(datanode2,
         container1Replicas.iterator().next().getDatanodeDetails());
 
     container2Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container2.getContainerID()));
+        .getContainerReplicas(ContainerID.valueOf(container2.getContainerID()));
     Assert.assertEquals(1, container2Replicas.size());
     Assert.assertEquals(datanode2,
         container2Replicas.iterator().next().getDatanodeDetails());
 
     container3Replicas = containerManager
-        .getContainerReplicas(new ContainerID(container3.getContainerID()));
+        .getContainerReplicas(ContainerID.valueOf(container3.getContainerID()));
     Assert.assertEquals(1, container3Replicas.size());
     Assert.assertEquals(datanode3,
         container3Replicas.iterator().next().getDatanodeDetails());
   }
 
-  private void registerReplicas(ContainerManager contManager,
+  private void registerReplicas(ContainerManagerV2 contManager,
       ContainerInfo container, DatanodeDetails... datanodes)
       throws ContainerNotFoundException {
     for (DatanodeDetails datanode : datanodes) {
       contManager.updateContainerReplica(
-          new ContainerID(container.getContainerID()),
+          ContainerID.valueOf(container.getContainerID()),
           ContainerReplica.newBuilder()
               .setContainerState(ContainerReplicaProto.State.OPEN)
               .setContainerID(container.containerID())
@@ -262,7 +261,7 @@
     nodeManager
         .setContainers(datanode,
             Arrays.stream(containers)
-                .map(container -> new ContainerID(container.getContainerID()))
+                .map(ContainerInfo::containerID)
                 .collect(Collectors.toSet()));
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index 1a6a2ae..847e03e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@ -22,8 +22,8 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
@@ -252,7 +252,7 @@
 
   private SCMNodeManager createNodeManager(OzoneConfiguration config)
       throws IOException, AuthenticationException {
-    scm = HddsTestUtils.getScm(config);
+    scm = TestUtils.getScm(config);
     return (SCMNodeManager) scm.getScmNodeManager();
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index 69b031c..78da066 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -25,6 +25,7 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
@@ -58,8 +59,8 @@
     SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
     Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
-    nodeManager =
-        new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap);
+    nodeManager = new SCMNodeManager(conf, storageConfig,
+        new EventQueue(), clusterMap, SCMContext.emptyContext());
     nodeReportHandler = new NodeReportHandler(nodeManager);
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 3c036d7..0e2033e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -38,7 +38,6 @@
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
@@ -142,7 +141,7 @@
 
   SCMNodeManager createNodeManager(OzoneConfiguration config)
       throws IOException, AuthenticationException {
-    scm = HddsTestUtils.getScm(config);
+    scm = TestUtils.getScm(config);
     return (SCMNodeManager) scm.getScmNodeManager();
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
index a6b0339..0ebab87 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
@@ -26,7 +26,6 @@
     .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
@@ -68,7 +67,7 @@
     conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s");
     conf.set(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, "2s");
     final EventQueue eventQueue = new EventQueue();
-    final StorageContainerManager scm = HddsTestUtils.getScm(conf);
+    final StorageContainerManager scm = TestUtils.getScm(conf);
     nodeManager = scm.getScmNodeManager();
     final DeadNodeHandler deadNodeHandler = new DeadNodeHandler(
         nodeManager, Mockito.mock(PipelineManager.class),
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
index 6adc6c0..e9b365a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
@@ -52,7 +52,7 @@
       TreeSet<ContainerID> currentSet = new TreeSet<>();
       for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
         long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex;
-        currentSet.add(new ContainerID(currentCnIndex));
+        currentSet.add(ContainerID.valueOf(currentCnIndex));
       }
       testData.put(UUID.randomUUID(), currentSet);
     }
@@ -206,7 +206,7 @@
     TreeSet<ContainerID> addedContainers = new TreeSet<>();
     for (int x = 1; x <= newCount; x++) {
       long cTemp = last.getId() + x;
-      addedContainers.add(new ContainerID(cTemp));
+      addedContainers.add(ContainerID.valueOf(cTemp));
     }
 
     // This set is the super set of existing containers and new containers.
@@ -250,7 +250,7 @@
     for (int x = 0; x < removeCount; x++) {
       int startBase = (int) first.getId();
       long cTemp = r.nextInt(values.size());
-      removedContainers.add(new ContainerID(cTemp + startBase));
+      removedContainers.add(ContainerID.valueOf(cTemp + startBase));
     }
 
     // This set is a new set with some containers removed.
@@ -282,7 +282,7 @@
     Set<ContainerID> insertedSet = new TreeSet<>();
     // Insert nodes from 1..30
     for (int x = 1; x <= 30; x++) {
-      insertedSet.add(new ContainerID(x));
+      insertedSet.add(ContainerID.valueOf(x));
     }
 
 
@@ -296,7 +296,7 @@
     for (int x = 0; x < removeCount; x++) {
       int startBase = (int) first.getId();
       long cTemp = r.nextInt(values.size());
-      removedContainers.add(new ContainerID(cTemp + startBase));
+      removedContainers.add(ContainerID.valueOf(cTemp + startBase));
     }
 
     Set<ContainerID> newSet = new TreeSet<>(values);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
index 5954f08..b2b3a65 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNodeStateMap.java
@@ -136,9 +136,9 @@
 
     UUID dnUuid = datanodeDetails.getUuid();
 
-    nodeStateMap.addContainer(dnUuid, new ContainerID(1L));
-    nodeStateMap.addContainer(dnUuid, new ContainerID(2L));
-    nodeStateMap.addContainer(dnUuid, new ContainerID(3L));
+    nodeStateMap.addContainer(dnUuid, ContainerID.valueOf(1L));
+    nodeStateMap.addContainer(dnUuid, ContainerID.valueOf(2L));
+    nodeStateMap.addContainer(dnUuid, ContainerID.valueOf(3L));
 
     CountDownLatch elementRemoved = new CountDownLatch(1);
     CountDownLatch loopStarted = new CountDownLatch(1);
@@ -146,7 +146,7 @@
     new Thread(() -> {
       try {
         loopStarted.await();
-        nodeStateMap.removeContainer(dnUuid, new ContainerID(1L));
+        nodeStateMap.removeContainer(dnUuid, ContainerID.valueOf(1L));
         elementRemoved.countDown();
       } catch (Exception e) {
         e.printStackTrace();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
new file mode 100644
index 0000000..d03aa55
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.utils.db.Table;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * Mock PipelineManager implementation for testing.
+ */
+public final class MockPipelineManager implements PipelineManager {
+
+  private PipelineStateManager stateManager;
+
+  public static PipelineManager getInstance() {
+    return new MockPipelineManager();
+  }
+
+  private MockPipelineManager() {
+    this.stateManager = new PipelineStateManager();
+  }
+
+  @Override
+  public Pipeline createPipeline(final ReplicationType type,
+                                 final ReplicationFactor factor)
+      throws IOException {
+    final List<DatanodeDetails> nodes = Stream.generate(
+        MockDatanodeDetails::randomDatanodeDetails)
+        .limit(factor.getNumber()).collect(Collectors.toList());
+    final Pipeline pipeline = Pipeline.newBuilder()
+        .setId(PipelineID.randomId())
+        .setType(type)
+        .setFactor(factor)
+        .setNodes(nodes)
+        .setState(Pipeline.PipelineState.OPEN)
+        .build();
+    stateManager.addPipeline(pipeline);
+    return pipeline;
+  }
+
+  @Override
+  public Pipeline createPipeline(final ReplicationType type,
+                                 final ReplicationFactor factor,
+                                 final List<DatanodeDetails> nodes) {
+    return Pipeline.newBuilder()
+        .setId(PipelineID.randomId())
+        .setType(type)
+        .setFactor(factor)
+        .setNodes(nodes)
+        .setState(Pipeline.PipelineState.OPEN)
+        .build();
+  }
+
+  @Override
+  public Pipeline getPipeline(final PipelineID pipelineID)
+      throws PipelineNotFoundException {
+    return stateManager.getPipeline(pipelineID);
+  }
+
+  @Override
+  public boolean containsPipeline(final PipelineID pipelineID) {
+    try {
+      stateManager.getPipeline(pipelineID);
+      return true;
+    } catch (PipelineNotFoundException e) {
+      return false;
+    }
+  }
+
+  @Override
+  public List<Pipeline> getPipelines() {
+    return stateManager.getPipelines();
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(final ReplicationType type) {
+    return stateManager.getPipelines(type);
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(final ReplicationType type,
+                                     final ReplicationFactor factor) {
+    return stateManager.getPipelines(type, factor);
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(final ReplicationType type,
+                                     final Pipeline.PipelineState state) {
+    return stateManager.getPipelines(type, state);
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(final ReplicationType type,
+                                     final ReplicationFactor factor,
+                                     final Pipeline.PipelineState state) {
+    return stateManager.getPipelines(type, factor, state);
+  }
+
+  @Override
+  public List<Pipeline> getPipelines(final ReplicationType type,
+      final ReplicationFactor factor, final Pipeline.PipelineState state,
+      final Collection<DatanodeDetails> excludeDns,
+      final Collection<PipelineID> excludePipelines) {
+    return stateManager.getPipelines(type, factor, state,
+        excludeDns, excludePipelines);
+  }
+
+  @Override
+  public void addContainerToPipeline(final PipelineID pipelineID,
+                                     final ContainerID containerID)
+      throws IOException {
+    stateManager.addContainerToPipeline(pipelineID, containerID);
+  }
+
+  @Override
+  public void removeContainerFromPipeline(final PipelineID pipelineID,
+                                          final ContainerID containerID)
+      throws IOException {
+    stateManager.removeContainerFromPipeline(pipelineID, containerID);
+  }
+
+  @Override
+  public NavigableSet<ContainerID> getContainersInPipeline(
+      final PipelineID pipelineID) throws IOException {
+    return stateManager.getContainers(pipelineID);
+  }
+
+  @Override
+  public int getNumberOfContainers(final PipelineID pipelineID)
+      throws IOException {
+    return getContainersInPipeline(pipelineID).size();
+  }
+
+  @Override
+  public void openPipeline(final PipelineID pipelineId)
+      throws IOException {
+    stateManager.openPipeline(pipelineId);
+  }
+
+  @Override
+  public void closePipeline(final Pipeline pipeline, final boolean onTimeout)
+      throws IOException {
+    stateManager.finalizePipeline(pipeline.getId());
+  }
+
+  @Override
+  public void scrubPipeline(final ReplicationType type,
+                            final ReplicationFactor factor)
+      throws IOException {
+
+  }
+
+  @Override
+  public void startPipelineCreator() {
+
+  }
+
+  @Override
+  public void triggerPipelineCreation() {
+
+  }
+
+  @Override
+  public void incNumBlocksAllocatedMetric(final PipelineID id) {
+
+  }
+
+  @Override
+  public int minHealthyVolumeNum(Pipeline pipeline) {
+    return 0;
+  }
+
+  @Override
+  public int minPipelineLimit(Pipeline pipeline) {
+    return 0;
+  }
+
+  @Override
+  public void activatePipeline(final PipelineID pipelineID)
+      throws IOException {
+
+  }
+
+  @Override
+  public void deactivatePipeline(final PipelineID pipelineID)
+      throws IOException {
+    stateManager.deactivatePipeline(pipelineID);
+  }
+
+  @Override
+  public boolean getSafeModeStatus() {
+    return false;
+  }
+
+  @Override
+  public void reinitialize(Table<PipelineID, Pipeline> pipelineStore)
+      throws IOException {
+
+  }
+
+  @Override
+  public void close() throws IOException {
+
+  }
+
+  @Override
+  public Map<String, Integer> getPipelineInfo() {
+    return null;
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
index f9fb150..0e34ae5 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
@@ -34,32 +35,28 @@
 public class MockRatisPipelineProvider extends RatisPipelineProvider {
 
   private boolean autoOpenPipeline;
-  private  boolean isHealthy;
 
-  public MockRatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager, ConfigurationSource conf,
-      EventPublisher eventPublisher, boolean autoOpen) {
-    super(nodeManager, stateManager, conf, eventPublisher);
+  public MockRatisPipelineProvider(
+      NodeManager nodeManager, StateManager stateManager,
+      ConfigurationSource conf, EventPublisher eventPublisher,
+      boolean autoOpen) {
+    super(nodeManager, stateManager,
+        conf, eventPublisher, SCMContext.emptyContext());
     autoOpenPipeline = autoOpen;
   }
 
   public MockRatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager,
+      StateManager stateManager,
       ConfigurationSource conf) {
-    super(nodeManager, stateManager, conf, new EventQueue());
+    super(nodeManager, stateManager,
+        conf, new EventQueue(), SCMContext.emptyContext());
   }
 
-  public MockRatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager,
-      ConfigurationSource conf, boolean isHealthy) {
-    super(nodeManager, stateManager, conf, new EventQueue());
-    this.isHealthy = isHealthy;
-  }
-
-  public MockRatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager, ConfigurationSource conf,
-      EventPublisher eventPublisher) {
-    super(nodeManager, stateManager, conf, eventPublisher);
+  public MockRatisPipelineProvider(
+      NodeManager nodeManager, StateManager stateManager,
+      ConfigurationSource conf, EventPublisher eventPublisher) {
+    super(nodeManager, stateManager,
+        conf, eventPublisher, SCMContext.emptyContext());
     autoOpenPipeline = true;
   }
 
@@ -82,16 +79,18 @@
           .setFactor(factor)
           .setNodes(initialPipeline.getNodes())
           .build();
-      if (isHealthy) {
-        for (DatanodeDetails datanodeDetails : initialPipeline.getNodes()) {
-          pipeline.reportDatanode(datanodeDetails);
-        }
-        pipeline.setLeaderId(initialPipeline.getFirstNode().getUuid());
-      }
       return pipeline;
     }
   }
 
+  public static void markPipelineHealthy(Pipeline pipeline)
+      throws IOException {
+    for (DatanodeDetails datanodeDetails : pipeline.getNodes()) {
+      pipeline.reportDatanode(datanodeDetails);
+    }
+    pipeline.setLeaderId(pipeline.getFirstNode().getUuid());
+  }
+
   @Override
   public void shutdown() {
     // Do nothing.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
index 99443c3..3578718 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
@@ -22,9 +22,11 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -37,7 +39,7 @@
 
   @Test
   public void testCloseActionForMissingPipeline()
-      throws PipelineNotFoundException {
+      throws PipelineNotFoundException, NotLeaderException {
     final PipelineManager manager = Mockito.mock(PipelineManager.class);
     final EventQueue queue = Mockito.mock(EventQueue.class);
 
@@ -45,7 +47,7 @@
         .thenThrow(new PipelineNotFoundException());
 
     final PipelineActionHandler actionHandler =
-        new PipelineActionHandler(manager, null);
+        new PipelineActionHandler(manager, SCMContext.emptyContext(), null);
 
     final PipelineActionsProto actionsProto = PipelineActionsProto.newBuilder()
         .addPipelineActions(PipelineAction.newBuilder()
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
index 3f2ed2c..7203d9d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
@@ -81,7 +81,7 @@
     NodeManager nodeManager= new MockNodeManager(true, nodeCount);
     conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness);
     conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
-    PipelineStateManager stateManager = new PipelineStateManager();
+    StateManager stateManager = new PipelineStateManager();
     PipelineProvider provider = new MockRatisPipelineProvider(nodeManager,
         stateManager, conf);
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
new file mode 100644
index 0000000..06dbc0b
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -0,0 +1,645 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.MockNodeManager;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.SCMHADBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHADBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.ALLOCATED;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.fail;
+
+/**
+ * Tests for PipelineManagerImpl.
+ */
+public class TestPipelineManagerImpl {
+  private OzoneConfiguration conf;
+  private File testDir;
+  private DBStore dbStore;
+  private MockNodeManager nodeManager;
+  private int maxPipelineCount;
+  private SCMContext scmContext;
+  private SCMServiceManager serviceManager;
+
+  @Before
+  public void init() throws Exception {
+    conf = SCMTestUtils.getConf();
+    testDir = GenericTestUtils.getTestDir(
+        TestPipelineManagerImpl.class.getSimpleName() + UUID.randomUUID());
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+    dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
+    nodeManager = new MockNodeManager(true, 20);
+    maxPipelineCount = nodeManager.getNodeCount(
+        HddsProtos.NodeOperationalState.IN_SERVICE,
+        HddsProtos.NodeState.HEALTHY) *
+        conf.getInt(OZONE_DATANODE_PIPELINE_LIMIT,
+            OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT) /
+        HddsProtos.ReplicationFactor.THREE.getNumber();
+    scmContext = SCMContext.emptyContext();
+    serviceManager = new SCMServiceManager();
+  }
+
+  @After
+  public void cleanup() throws Exception {
+    if (dbStore != null) {
+      dbStore.close();
+    }
+    FileUtil.fullyDelete(testDir);
+  }
+
+  private PipelineManagerV2Impl createPipelineManager(boolean isLeader)
+      throws IOException {
+    return PipelineManagerV2Impl.newPipelineManager(conf,
+        MockSCMHAManager.getInstance(isLeader),
+        new MockNodeManager(true, 20),
+        SCMDBDefinition.PIPELINES.getTable(dbStore),
+        new EventQueue(),
+        scmContext,
+        serviceManager);
+  }
+
+  private PipelineManagerV2Impl createPipelineManager(
+      boolean isLeader, SCMHADBTransactionBuffer buffer) throws IOException {
+    return PipelineManagerV2Impl.newPipelineManager(conf,
+        MockSCMHAManager.getInstance(isLeader, buffer),
+        new MockNodeManager(true, 20),
+        SCMDBDefinition.PIPELINES.getTable(dbStore),
+        new EventQueue(),
+        SCMContext.emptyContext(),
+        serviceManager);
+  }
+
+  @Test
+  public void testCreatePipeline() throws Exception {
+    SCMHADBTransactionBuffer buffer1 =
+        new MockSCMHADBTransactionBuffer(dbStore);
+    PipelineManagerV2Impl pipelineManager =
+        createPipelineManager(true, buffer1);
+    Assert.assertTrue(pipelineManager.getPipelines().isEmpty());
+    Pipeline pipeline1 = pipelineManager.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+    Assert.assertEquals(1, pipelineManager.getPipelines().size());
+    Assert.assertTrue(pipelineManager.containsPipeline(pipeline1.getId()));
+
+    Pipeline pipeline2 = pipelineManager.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE);
+    Assert.assertEquals(2, pipelineManager.getPipelines().size());
+    Assert.assertTrue(pipelineManager.containsPipeline(pipeline2.getId()));
+    buffer1.close();
+    pipelineManager.close();
+
+    SCMHADBTransactionBuffer buffer2 =
+        new MockSCMHADBTransactionBuffer(dbStore);
+    PipelineManagerV2Impl pipelineManager2 =
+        createPipelineManager(true, buffer2);
+    // Should be able to load previous pipelines.
+    Assert.assertFalse(pipelineManager2.getPipelines().isEmpty());
+    Assert.assertEquals(2, pipelineManager.getPipelines().size());
+    Pipeline pipeline3 = pipelineManager2.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+    buffer2.close();
+    Assert.assertEquals(3, pipelineManager2.getPipelines().size());
+    Assert.assertTrue(pipelineManager2.containsPipeline(pipeline3.getId()));
+
+    pipelineManager2.close();
+  }
+
+  @Test
+  public void testCreatePipelineShouldFailOnFollower() throws Exception {
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(false);
+    Assert.assertTrue(pipelineManager.getPipelines().isEmpty());
+    try {
+      pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
+          HddsProtos.ReplicationFactor.THREE);
+    } catch (NotLeaderException ex) {
+      pipelineManager.close();
+      return;
+    }
+    // Should not reach here.
+    Assert.fail();
+  }
+
+  @Test
+  public void testUpdatePipelineStates() throws Exception {
+    SCMHADBTransactionBuffer buffer = new MockSCMHADBTransactionBuffer(dbStore);
+    PipelineManagerV2Impl pipelineManager =
+        createPipelineManager(true, buffer);
+    Table<PipelineID, Pipeline> pipelineStore =
+        SCMDBDefinition.PIPELINES.getTable(dbStore);
+    Pipeline pipeline = pipelineManager.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+    Assert.assertEquals(1, pipelineManager.getPipelines().size());
+    Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+    Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+    buffer.flush();
+    Assert.assertEquals(ALLOCATED,
+        pipelineStore.get(pipeline.getId()).getPipelineState());
+    PipelineID pipelineID = pipeline.getId();
+
+    pipelineManager.openPipeline(pipelineID);
+    pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueOf(1));
+    Assert.assertTrue(pipelineManager
+        .getPipelines(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE,
+            Pipeline.PipelineState.OPEN).contains(pipeline));
+    buffer.flush();
+    Assert.assertTrue(pipelineStore.get(pipeline.getId()).isOpen());
+
+    pipelineManager.deactivatePipeline(pipeline.getId());
+    Assert.assertEquals(Pipeline.PipelineState.DORMANT,
+        pipelineManager.getPipeline(pipelineID).getPipelineState());
+    buffer.flush();
+    Assert.assertEquals(Pipeline.PipelineState.DORMANT,
+        pipelineStore.get(pipeline.getId()).getPipelineState());
+    Assert.assertFalse(pipelineManager
+        .getPipelines(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE,
+            Pipeline.PipelineState.OPEN).contains(pipeline));
+
+    pipelineManager.activatePipeline(pipeline.getId());
+    Assert.assertTrue(pipelineManager
+        .getPipelines(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE,
+            Pipeline.PipelineState.OPEN).contains(pipeline));
+    buffer.flush();
+    Assert.assertTrue(pipelineStore.get(pipeline.getId()).isOpen());
+    pipelineManager.close();
+  }
+
+  @Test
+  public void testOpenPipelineShouldFailOnFollower() throws Exception {
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    Pipeline pipeline = pipelineManager.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+    Assert.assertEquals(1, pipelineManager.getPipelines().size());
+    Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+    Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+    // Change to follower
+    assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager;
+    ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false);
+    try {
+      pipelineManager.openPipeline(pipeline.getId());
+    } catch (NotLeaderException ex) {
+      pipelineManager.close();
+      return;
+    }
+    // Should not reach here.
+    Assert.fail();
+  }
+
+  @Test
+  public void testActivatePipelineShouldFailOnFollower() throws Exception {
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    Pipeline pipeline = pipelineManager.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+    Assert.assertEquals(1, pipelineManager.getPipelines().size());
+    Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+    Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+    // Change to follower
+    assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager;
+    ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false);
+    try {
+      pipelineManager.activatePipeline(pipeline.getId());
+    } catch (NotLeaderException ex) {
+      pipelineManager.close();
+      return;
+    }
+    // Should not reach here.
+    Assert.fail();
+  }
+
+  @Test
+  public void testDeactivatePipelineShouldFailOnFollower() throws Exception {
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    Pipeline pipeline = pipelineManager.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+    Assert.assertEquals(1, pipelineManager.getPipelines().size());
+    Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+    Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+    // Change to follower
+    assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager;
+    ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false);
+    try {
+      pipelineManager.deactivatePipeline(pipeline.getId());
+    } catch (NotLeaderException ex) {
+      pipelineManager.close();
+      return;
+    }
+    // Should not reach here.
+    Assert.fail();
+  }
+
+  @Test
+  public void testRemovePipeline() throws Exception {
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    // Create a pipeline
+    Pipeline pipeline = pipelineManager.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+    Assert.assertEquals(1, pipelineManager.getPipelines().size());
+    Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+    Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+
+    // Open the pipeline
+    pipelineManager.openPipeline(pipeline.getId());
+    pipelineManager
+        .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
+    Assert.assertTrue(pipelineManager
+        .getPipelines(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE,
+            Pipeline.PipelineState.OPEN).contains(pipeline));
+
+    try {
+      pipelineManager.removePipeline(pipeline);
+      fail();
+    } catch (IOException ioe) {
+      // Should not be able to remove the OPEN pipeline.
+      Assert.assertEquals(1, pipelineManager.getPipelines().size());
+    } catch (Exception e) {
+      Assert.fail("Should not reach here.");
+    }
+
+    // Destroy pipeline
+    pipelineManager.closePipeline(pipeline, false);
+    try {
+      pipelineManager.getPipeline(pipeline.getId());
+      fail("Pipeline should not have been retrieved");
+    } catch (PipelineNotFoundException e) {
+      // There may be pipelines created by BackgroundPipelineCreator
+      // exist in pipelineManager, just ignore them.
+    }
+
+    pipelineManager.close();
+  }
+
+  @Test
+  public void testClosePipelineShouldFailOnFollower() throws Exception {
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    Pipeline pipeline = pipelineManager.createPipeline(
+        HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+    Assert.assertEquals(1, pipelineManager.getPipelines().size());
+    Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+    Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+    // Change to follower
+    assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager;
+    ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false);
+    try {
+      pipelineManager.closePipeline(pipeline, false);
+    } catch (NotLeaderException ex) {
+      pipelineManager.close();
+      return;
+    }
+    // Should not reach here.
+    Assert.fail();
+  }
+
+  @Test
+  public void testPipelineReport() throws Exception {
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    SCMSafeModeManager scmSafeModeManager =
+        new SCMSafeModeManager(conf, new ArrayList<>(), pipelineManager,
+            new EventQueue(), serviceManager, scmContext);
+    Pipeline pipeline = pipelineManager
+        .createPipeline(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE);
+
+    // pipeline is not healthy until all dns report
+    List<DatanodeDetails> nodes = pipeline.getNodes();
+    Assert.assertFalse(
+        pipelineManager.getPipeline(pipeline.getId()).isHealthy());
+    // get pipeline report from each dn in the pipeline
+    PipelineReportHandler pipelineReportHandler =
+        new PipelineReportHandler(scmSafeModeManager, pipelineManager,
+            SCMContext.emptyContext(), conf);
+    nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline,
+        pipelineReportHandler, false));
+    sendPipelineReport(nodes.get(nodes.size() - 1), pipeline,
+        pipelineReportHandler, true);
+
+    // pipeline is healthy when all dns report
+    Assert
+        .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isHealthy());
+    // pipeline should now move to open state
+    Assert
+        .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen());
+
+    // close the pipeline
+    pipelineManager.closePipeline(pipeline, false);
+
+    // pipeline report for destroyed pipeline should be ignored
+    nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline,
+        pipelineReportHandler, false));
+    sendPipelineReport(nodes.get(nodes.size() - 1), pipeline,
+        pipelineReportHandler, true);
+
+    try {
+      pipelineManager.getPipeline(pipeline.getId());
+      fail("Pipeline should not have been retrieved");
+    } catch (PipelineNotFoundException e) {
+      // should reach here
+    }
+
+    // clean up
+    pipelineManager.close();
+  }
+
+  @Test
+  public void testPipelineCreationFailedMetric() throws Exception {
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+
+    // No pipeline at start
+    MetricsRecordBuilder metrics = getMetrics(
+        SCMPipelineMetrics.class.getSimpleName());
+    long numPipelineAllocated = getLongCounter("NumPipelineAllocated",
+        metrics);
+    Assert.assertEquals(0, numPipelineAllocated);
+
+    // 3 DNs are unhealthy.
+    // Create 5 pipelines (Use up 15 Datanodes)
+
+    for (int i = 0; i < maxPipelineCount; i++) {
+      Pipeline pipeline = pipelineManager
+          .createPipeline(HddsProtos.ReplicationType.RATIS,
+              HddsProtos.ReplicationFactor.THREE);
+      Assert.assertNotNull(pipeline);
+    }
+
+    metrics = getMetrics(
+        SCMPipelineMetrics.class.getSimpleName());
+    numPipelineAllocated = getLongCounter("NumPipelineAllocated", metrics);
+    Assert.assertEquals(maxPipelineCount, numPipelineAllocated);
+
+    long numPipelineCreateFailed = getLongCounter(
+        "NumPipelineCreationFailed", metrics);
+    Assert.assertEquals(0, numPipelineCreateFailed);
+
+    //This should fail...
+    try {
+      pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
+          HddsProtos.ReplicationFactor.THREE);
+      fail();
+    } catch (SCMException ioe) {
+      // pipeline creation failed this time.
+      Assert.assertEquals(SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE,
+          ioe.getResult());
+    }
+
+    metrics = getMetrics(
+        SCMPipelineMetrics.class.getSimpleName());
+    numPipelineAllocated = getLongCounter("NumPipelineAllocated", metrics);
+    Assert.assertEquals(maxPipelineCount, numPipelineAllocated);
+
+    numPipelineCreateFailed = getLongCounter(
+        "NumPipelineCreationFailed", metrics);
+    Assert.assertEquals(1, numPipelineCreateFailed);
+
+    // clean up
+    pipelineManager.close();
+  }
+
+  @Test
+  public void testPipelineOpenOnlyWhenLeaderReported() throws Exception {
+    SCMHADBTransactionBuffer buffer1 =
+        new MockSCMHADBTransactionBuffer(dbStore);
+    PipelineManagerV2Impl pipelineManager =
+        createPipelineManager(true, buffer1);
+
+    Pipeline pipeline = pipelineManager
+        .createPipeline(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE);
+    // close manager
+    buffer1.close();
+    pipelineManager.close();
+    // new pipeline manager loads the pipelines from the db in ALLOCATED state
+    pipelineManager = createPipelineManager(true);
+    Assert.assertEquals(Pipeline.PipelineState.ALLOCATED,
+        pipelineManager.getPipeline(pipeline.getId()).getPipelineState());
+
+    SCMSafeModeManager scmSafeModeManager =
+        new SCMSafeModeManager(new OzoneConfiguration(), new ArrayList<>(),
+            pipelineManager, new EventQueue(), serviceManager, scmContext);
+    PipelineReportHandler pipelineReportHandler =
+        new PipelineReportHandler(scmSafeModeManager, pipelineManager,
+            SCMContext.emptyContext(), conf);
+
+    // Report pipelines with leaders
+    List<DatanodeDetails> nodes = pipeline.getNodes();
+    Assert.assertEquals(3, nodes.size());
+    // Send report for all but no leader
+    nodes.forEach(dn -> sendPipelineReport(dn, pipeline, pipelineReportHandler,
+        false));
+
+    Assert.assertEquals(Pipeline.PipelineState.ALLOCATED,
+        pipelineManager.getPipeline(pipeline.getId()).getPipelineState());
+
+    nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline,
+        pipelineReportHandler, false));
+    sendPipelineReport(nodes.get(nodes.size() - 1), pipeline,
+        pipelineReportHandler, true);
+
+    Assert.assertEquals(Pipeline.PipelineState.OPEN,
+        pipelineManager.getPipeline(pipeline.getId()).getPipelineState());
+
+    pipelineManager.close();
+  }
+
+  @Test
+  public void testScrubPipeline() throws Exception {
+    // No timeout for pipeline scrubber.
+    conf.setTimeDuration(
+        OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1,
+        TimeUnit.MILLISECONDS);
+
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    Pipeline pipeline = pipelineManager
+        .createPipeline(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE);
+    // At this point, pipeline is not at OPEN stage.
+    Assert.assertEquals(Pipeline.PipelineState.ALLOCATED,
+        pipeline.getPipelineState());
+
+    // pipeline should be seen in pipelineManager as ALLOCATED.
+    Assert.assertTrue(pipelineManager
+        .getPipelines(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE,
+            Pipeline.PipelineState.ALLOCATED).contains(pipeline));
+    pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS,
+        HddsProtos.ReplicationFactor.THREE);
+
+    // pipeline should be scrubbed.
+    Assert.assertFalse(pipelineManager
+        .getPipelines(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE,
+            Pipeline.PipelineState.ALLOCATED).contains(pipeline));
+
+    pipelineManager.close();
+  }
+
+  @Test
+  public void testScrubPipelineShouldFailOnFollower() throws Exception {
+    // No timeout for pipeline scrubber.
+    conf.setTimeDuration(
+        OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1,
+        TimeUnit.MILLISECONDS);
+
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    Pipeline pipeline = pipelineManager
+        .createPipeline(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE);
+    // At this point, pipeline is not at OPEN stage.
+    Assert.assertEquals(Pipeline.PipelineState.ALLOCATED,
+        pipeline.getPipelineState());
+
+    // pipeline should be seen in pipelineManager as ALLOCATED.
+    Assert.assertTrue(pipelineManager
+        .getPipelines(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE,
+            Pipeline.PipelineState.ALLOCATED).contains(pipeline));
+
+    // Change to follower
+    assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager;
+    ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false);
+
+    try {
+      pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS,
+          HddsProtos.ReplicationFactor.THREE);
+    } catch (NotLeaderException ex) {
+      pipelineManager.close();
+      return;
+    }
+    // Should not reach here.
+    Assert.fail();
+  }
+
+  @Test
+  public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception {
+    // No timeout for pipeline scrubber.
+    conf.setTimeDuration(
+        OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1,
+        TimeUnit.MILLISECONDS);
+
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(true, false));
+
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+    try {
+      pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
+              HddsProtos.ReplicationFactor.THREE);
+      fail("Pipelines should not have been created");
+    } catch (IOException e) {
+      // No pipeline is created.
+      Assert.assertTrue(pipelineManager.getPipelines().isEmpty());
+    }
+
+    // Ensure a pipeline of factor ONE can be created - no exceptions should be
+    // raised.
+    Pipeline pipeline = pipelineManager
+        .createPipeline(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE);
+    Assert.assertTrue(pipelineManager
+        .getPipelines(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE).contains(pipeline));
+
+    // Simulate safemode check exiting.
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(true, true));
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return pipelineManager.getPipelines().size() != 0;
+      }
+    }, 100, 10000);
+    pipelineManager.close();
+  }
+
+  @Test
+  public void testSafeModeUpdatedOnSafemodeExit() throws Exception {
+    // No timeout for pipeline scrubber.
+    conf.setTimeDuration(
+        OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1,
+        TimeUnit.MILLISECONDS);
+
+    PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(true, false));
+    Assert.assertTrue(pipelineManager.getSafeModeStatus());
+    Assert.assertFalse(pipelineManager.isPipelineCreationAllowed());
+
+    // First pass pre-check as true, but safemode still on
+    // Simulate safemode check exiting.
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(true, true));
+    Assert.assertTrue(pipelineManager.getSafeModeStatus());
+    Assert.assertTrue(pipelineManager.isPipelineCreationAllowed());
+
+    // Then also turn safemode off
+    scmContext.updateSafeModeStatus(
+        new SCMSafeModeManager.SafeModeStatus(false, true));
+    Assert.assertFalse(pipelineManager.getSafeModeStatus());
+    Assert.assertTrue(pipelineManager.isPipelineCreationAllowed());
+    pipelineManager.close();
+  }
+
+  private void sendPipelineReport(
+      DatanodeDetails dn, Pipeline pipeline,
+      PipelineReportHandler pipelineReportHandler,
+      boolean isLeader) {
+    SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode report =
+        TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId(), isLeader);
+    pipelineReportHandler.onMessage(report, new EventQueue());
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
index 6bff581..43d5398 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
@@ -37,7 +37,7 @@
  */
 public class TestPipelineStateManager {
 
-  private PipelineStateManager stateManager;
+  private StateManager stateManager;
 
   @Before
   public void init() throws Exception {
@@ -290,14 +290,14 @@
     stateManager.addPipeline(pipeline);
     pipeline = stateManager.getPipeline(pipeline.getId());
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
 
     // move pipeline to open state
     stateManager.openPipeline(pipeline.getId());
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
 
     //verify the number of containers returned
     Set<ContainerID> containerIDs =
@@ -307,7 +307,7 @@
     removePipeline(pipeline);
     try {
       stateManager.addContainerToPipeline(pipeline.getId(),
-          ContainerID.valueof(++containerID));
+          ContainerID.valueOf(++containerID));
       Assert.fail("Container should not have been added");
     } catch (IOException e) {
       // Can not add a container to removed pipeline
@@ -322,7 +322,7 @@
     // close the pipeline
     stateManager.openPipeline(pipeline.getId());
     stateManager
-        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
+        .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
 
     try {
       stateManager.removePipeline(pipeline.getId());
@@ -347,26 +347,26 @@
     stateManager.openPipeline(pipeline.getId());
 
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
+        ContainerID.valueOf(containerID));
     Assert.assertEquals(1, stateManager.getContainers(pipeline.getId()).size());
     stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
+        ContainerID.valueOf(containerID));
     Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size());
 
     // add two containers in the pipeline
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
     stateManager.addContainerToPipeline(pipeline.getId(),
-        ContainerID.valueof(++containerID));
+        ContainerID.valueOf(++containerID));
     Assert.assertEquals(2, stateManager.getContainers(pipeline.getId()).size());
 
     // move pipeline to closing state
     stateManager.finalizePipeline(pipeline.getId());
 
     stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(containerID));
+        ContainerID.valueOf(containerID));
     stateManager.removeContainerFromPipeline(pipeline.getId(),
-        ContainerID.valueof(--containerID));
+        ContainerID.valueOf(--containerID));
     Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size());
 
     // clean up
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
index 9e55287..2be7ab9 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
@@ -42,6 +42,8 @@
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.metadata.PipelineIDCodec;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
@@ -164,7 +166,7 @@
 
     // clean up
     for (Pipeline pipeline : pipelines) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+      pipelineManager.closePipeline(pipeline, false);
     }
     pipelineManager.close();
   }
@@ -186,8 +188,8 @@
             HddsProtos.ReplicationFactor.THREE);
     pipelineManager.openPipeline(pipeline.getId());
     pipelineManager
-        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
-    pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+        .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
+    pipelineManager.closePipeline(pipeline, false);
     pipelineManager.close();
 
     // new pipeline manager should not be able to load removed pipelines
@@ -220,7 +222,8 @@
 
     SCMSafeModeManager scmSafeModeManager =
         new SCMSafeModeManager(conf, new ArrayList<>(), pipelineManager,
-            eventQueue);
+            eventQueue, new SCMServiceManager(),
+            SCMContext.emptyContext());
 
     // create a pipeline in allocated state with no dns yet reported
     Pipeline pipeline = pipelineManager
@@ -238,7 +241,8 @@
         pipelineManager.getPipeline(pipeline.getId()).isHealthy());
     // get pipeline report from each dn in the pipeline
     PipelineReportHandler pipelineReportHandler =
-        new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf);
+        new PipelineReportHandler(scmSafeModeManager, pipelineManager,
+            SCMContext.emptyContext(), conf);
     nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline,
         pipelineReportHandler, false, eventQueue));
     sendPipelineReport(nodes.get(nodes.size() - 1), pipeline,
@@ -252,7 +256,7 @@
         .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen());
 
     // close the pipeline
-    pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+    pipelineManager.closePipeline(pipeline, false);
 
     // pipeline report for destroyed pipeline should be ignored
     nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline,
@@ -435,7 +439,7 @@
     final PipelineID pid = pipeline.getId();
 
     pipelineManager.openPipeline(pid);
-    pipelineManager.addContainerToPipeline(pid, ContainerID.valueof(1));
+    pipelineManager.addContainerToPipeline(pid, ContainerID.valueOf(1));
 
     Assert.assertTrue(pipelineManager
         .getPipelines(HddsProtos.ReplicationType.RATIS,
@@ -496,10 +500,13 @@
         pipelineManager.getPipeline(pipeline.getId()).getPipelineState());
 
     SCMSafeModeManager scmSafeModeManager =
-        new SCMSafeModeManager(new OzoneConfiguration(),
-            new ArrayList<>(), pipelineManager, eventQueue);
+        new SCMSafeModeManager(new OzoneConfiguration(), new ArrayList<>(),
+            pipelineManager, eventQueue,
+            new SCMServiceManager(),
+            SCMContext.emptyContext());
     PipelineReportHandler pipelineReportHandler =
-        new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf);
+        new PipelineReportHandler(scmSafeModeManager, pipelineManager,
+            SCMContext.emptyContext(), conf);
 
     // Report pipelines with leaders
     List<DatanodeDetails> nodes = pipeline.getNodes();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java
index 1f04b2a..19c778a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMStoreImplWithOldPipelineIDKeyFormat.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.scm.metadata.PipelineCodec;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
@@ -93,6 +94,11 @@
   }
 
   @Override
+  public Table<BigInteger, X509Certificate> getValidSCMCertsTable() {
+    return null;
+  }
+
+  @Override
   public Table<BigInteger, X509Certificate> getRevokedCertsTable() {
     return null;
   }
@@ -108,6 +114,11 @@
   }
 
   @Override
+  public Table<String, Long> getSequenceIdTable() {
+    return null;
+  }
+
+  @Override
   public TableIterator getAllCerts(CertificateStore.CertType certType) {
     return null;
   }
@@ -118,6 +129,11 @@
   }
 
   @Override
+  public Table<String, TransactionInfo> getTransactionInfoTable() {
+    return null;
+  }
+
+  @Override
   public BatchOperationHandler getBatchHandler() {
     return null;
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java
index 9110c92..8974f98 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java
@@ -19,6 +19,7 @@
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager;
 import org.apache.hadoop.hdds.scm.pipeline.RatisPipelineProvider;
@@ -47,7 +48,8 @@
         mock(NodeManager.class),
         mock(PipelineStateManager.class),
         conf,
-        mock(EventPublisher.class));
+        mock(EventPublisher.class),
+        SCMContext.emptyContext());
     Assert.assertSame(
         ratisPipelineProvider.getLeaderChoosePolicy().getClass(),
         MinLeaderCountChoosePolicy.class);
@@ -63,7 +65,8 @@
         mock(NodeManager.class),
         mock(PipelineStateManager.class),
         conf,
-        mock(EventPublisher.class));
+        mock(EventPublisher.class),
+        SCMContext.emptyContext());
 
     // expecting exception
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
index e770ba9..e7dbeeb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
@@ -31,12 +31,15 @@
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
 import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -53,6 +56,8 @@
   public void testHealthyPipelineSafeModeRuleWithNoPipelines()
       throws Exception {
     EventQueue eventQueue = new EventQueue();
+    SCMServiceManager serviceManager = new SCMServiceManager();
+    SCMContext scmContext = SCMContext.emptyContext();
     List<ContainerInfo> containers =
             new ArrayList<>(HddsTestUtils.getContainerInfo(1));
 
@@ -70,15 +75,23 @@
     SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config);
 
     try {
-      SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-          nodeManager, scmMetadataStore.getPipelineTable(), eventQueue);
+      PipelineManagerV2Impl pipelineManager =
+          PipelineManagerV2Impl.newPipelineManager(
+              config,
+              MockSCMHAManager.getInstance(true),
+              nodeManager,
+              scmMetadataStore.getPipelineTable(),
+              eventQueue,
+              scmContext,
+              serviceManager);
       PipelineProvider mockRatisProvider =
           new MockRatisPipelineProvider(nodeManager,
               pipelineManager.getStateManager(), config);
       pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
           mockRatisProvider);
       SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(
-          config, containers, pipelineManager, eventQueue);
+          config, containers, pipelineManager, eventQueue,
+          serviceManager, scmContext);
 
       HealthyPipelineSafeModeRule healthyPipelineSafeModeRule =
           scmSafeModeManager.getHealthyPipelineSafeModeRule();
@@ -97,6 +110,8 @@
         TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
 
     EventQueue eventQueue = new EventQueue();
+    SCMServiceManager serviceManager = new SCMServiceManager();
+    SCMContext scmContext = SCMContext.emptyContext();
     List<ContainerInfo> containers =
             new ArrayList<>(HddsTestUtils.getContainerInfo(1));
 
@@ -114,13 +129,19 @@
 
     SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config);
     try {
-      SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-          nodeManager, scmMetadataStore.getPipelineTable(), eventQueue);
-      pipelineManager.allowPipelineCreation();
+      PipelineManagerV2Impl pipelineManager =
+          PipelineManagerV2Impl.newPipelineManager(
+              config,
+              MockSCMHAManager.getInstance(true),
+              nodeManager,
+              scmMetadataStore.getPipelineTable(),
+              eventQueue,
+              scmContext,
+              serviceManager);
 
       PipelineProvider mockRatisProvider =
           new MockRatisPipelineProvider(nodeManager,
-              pipelineManager.getStateManager(), config, true);
+              pipelineManager.getStateManager(), config);
       pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
           mockRatisProvider);
 
@@ -138,8 +159,19 @@
               HddsProtos.ReplicationFactor.THREE);
       pipelineManager.openPipeline(pipeline3.getId());
 
+      // Mark pipeline healthy
+      pipeline1 = pipelineManager.getPipeline(pipeline1.getId());
+      MockRatisPipelineProvider.markPipelineHealthy(pipeline1);
+
+      pipeline2 = pipelineManager.getPipeline(pipeline2.getId());
+      MockRatisPipelineProvider.markPipelineHealthy(pipeline2);
+
+      pipeline3 = pipelineManager.getPipeline(pipeline3.getId());
+      MockRatisPipelineProvider.markPipelineHealthy(pipeline3);
+
       SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(
-          config, containers, pipelineManager, eventQueue);
+          config, containers, pipelineManager, eventQueue,
+          serviceManager, scmContext);
 
       HealthyPipelineSafeModeRule healthyPipelineSafeModeRule =
           scmSafeModeManager.getHealthyPipelineSafeModeRule();
@@ -175,6 +207,8 @@
         TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
 
     EventQueue eventQueue = new EventQueue();
+    SCMServiceManager serviceManager = new SCMServiceManager();
+    SCMContext scmContext = SCMContext.emptyContext();
     List<ContainerInfo> containers =
             new ArrayList<>(HddsTestUtils.getContainerInfo(1));
 
@@ -193,13 +227,19 @@
 
     SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config);
     try {
-      SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-          nodeManager, scmMetadataStore.getPipelineTable(), eventQueue);
+      PipelineManagerV2Impl pipelineManager =
+          PipelineManagerV2Impl.newPipelineManager(
+              config,
+              MockSCMHAManager.getInstance(true),
+              nodeManager,
+              scmMetadataStore.getPipelineTable(),
+              eventQueue,
+              scmContext,
+              serviceManager);
 
-      pipelineManager.allowPipelineCreation();
       PipelineProvider mockRatisProvider =
           new MockRatisPipelineProvider(nodeManager,
-              pipelineManager.getStateManager(), config, true);
+              pipelineManager.getStateManager(), config);
       pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
           mockRatisProvider);
 
@@ -217,9 +257,19 @@
               HddsProtos.ReplicationFactor.THREE);
       pipelineManager.openPipeline(pipeline3.getId());
 
+      // Mark pipeline healthy
+      pipeline1 = pipelineManager.getPipeline(pipeline1.getId());
+      MockRatisPipelineProvider.markPipelineHealthy(pipeline1);
+
+      pipeline2 = pipelineManager.getPipeline(pipeline2.getId());
+      MockRatisPipelineProvider.markPipelineHealthy(pipeline2);
+
+      pipeline3 = pipelineManager.getPipeline(pipeline3.getId());
+      MockRatisPipelineProvider.markPipelineHealthy(pipeline3);
 
       SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(
-          config, containers, pipelineManager, eventQueue);
+          config, containers, pipelineManager, eventQueue,
+          serviceManager, scmContext);
 
       HealthyPipelineSafeModeRule healthyPipelineSafeModeRule =
           scmSafeModeManager.getHealthyPipelineSafeModeRule();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
index 6430247..6de81dc 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java
@@ -32,9 +32,17 @@
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
-import org.apache.hadoop.hdds.scm.pipeline.*;
+import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -53,8 +61,10 @@
   @Rule
   public TemporaryFolder folder = new TemporaryFolder();
   private OneReplicaPipelineSafeModeRule rule;
-  private SCMPipelineManager pipelineManager;
+  private PipelineManagerV2Impl pipelineManager;
   private EventQueue eventQueue;
+  private SCMServiceManager serviceManager;
+  private SCMContext scmContext;
   private MockNodeManager mockNodeManager;
 
   private void setup(int nodes, int pipelineFactorThreeCount,
@@ -72,15 +82,20 @@
     mockNodeManager = new MockNodeManager(true, nodes);
 
     eventQueue = new EventQueue();
+    serviceManager = new SCMServiceManager();
+    scmContext = SCMContext.emptyContext();
 
     SCMMetadataStore scmMetadataStore =
             new SCMMetadataStoreImpl(ozoneConfiguration);
 
-    pipelineManager =
-        new SCMPipelineManager(ozoneConfiguration, mockNodeManager,
-            scmMetadataStore.getPipelineTable(),
-            eventQueue);
-    pipelineManager.allowPipelineCreation();
+    pipelineManager = PipelineManagerV2Impl.newPipelineManager(
+        ozoneConfiguration,
+        MockSCMHAManager.getInstance(true),
+        mockNodeManager,
+        scmMetadataStore.getPipelineTable(),
+        eventQueue,
+        scmContext,
+        serviceManager);
 
     PipelineProvider mockRatisProvider =
         new MockRatisPipelineProvider(mockNodeManager,
@@ -95,7 +110,7 @@
 
     SCMSafeModeManager scmSafeModeManager =
         new SCMSafeModeManager(ozoneConfiguration, containers,
-            pipelineManager, eventQueue);
+            pipelineManager, eventQueue, serviceManager, scmContext);
 
     rule = scmSafeModeManager.getOneReplicaPipelineSafeModeRule();
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index 71aac96..9bc4ec2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -23,7 +23,6 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -37,10 +36,17 @@
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl;
-import org.apache.hadoop.hdds.scm.pipeline.*;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
+import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -53,6 +59,7 @@
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -64,6 +71,8 @@
 public class TestSCMSafeModeManager {
 
   private EventQueue queue;
+  private SCMContext scmContext;
+  private SCMServiceManager serviceManager;
   private SCMSafeModeManager scmSafeModeManager;
   private OzoneConfiguration config;
   private List<ContainerInfo> containers = Collections.emptyList();
@@ -79,6 +88,8 @@
   @Before
   public void setUp() {
     queue = new EventQueue();
+    scmContext = new SCMContext.Builder().build();
+    serviceManager = new SCMServiceManager();
     config = new OzoneConfiguration();
     config.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION,
         false);
@@ -110,7 +121,7 @@
   @Test
   public void testSafeModeStateWithNullContainers() {
     new SCMSafeModeManager(config, Collections.emptyList(),
-        null, queue);
+        null, queue, serviceManager, scmContext);
   }
 
   private void testSafeMode(int numContainers) throws Exception {
@@ -122,7 +133,7 @@
       container.setState(HddsProtos.LifeCycleState.CLOSED);
     }
     scmSafeModeManager = new SCMSafeModeManager(
-        config, containers, null, queue);
+        config, containers, null, queue, serviceManager, scmContext);
 
     assertTrue(scmSafeModeManager.getInSafeMode());
     queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
@@ -145,59 +156,6 @@
   }
 
   @Test
-  public void testDelayedEventNotification() throws Exception {
-
-    List<SafeModeStatus> delayedSafeModeEvents = new ArrayList<>();
-    List<SafeModeStatus> safeModeEvents = new ArrayList<>();
-
-    //given
-    EventQueue eventQueue = new EventQueue();
-    eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS,
-        (safeModeStatus, publisher) -> safeModeEvents.add(safeModeStatus));
-    eventQueue.addHandler(SCMEvents.DELAYED_SAFE_MODE_STATUS,
-        (safeModeStatus, publisher) -> delayedSafeModeEvents
-            .add(safeModeStatus));
-
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
-    ozoneConfiguration
-        .setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
-            3, TimeUnit.SECONDS);
-    ozoneConfiguration
-        .setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
-
-    scmSafeModeManager = new SCMSafeModeManager(
-        ozoneConfiguration, containers, null, eventQueue);
-
-    //when
-    scmSafeModeManager.setInSafeMode(true);
-    scmSafeModeManager.setPreCheckComplete(true);
-
-    scmSafeModeManager.emitSafeModeStatus();
-    eventQueue.processAll(1000L);
-
-    //then
-    Assert.assertEquals(1, delayedSafeModeEvents.size());
-    Assert.assertEquals(1, safeModeEvents.size());
-
-    //when
-    scmSafeModeManager.setInSafeMode(false);
-    scmSafeModeManager.setPreCheckComplete(true);
-
-    scmSafeModeManager.emitSafeModeStatus();
-    eventQueue.processAll(1000L);
-
-    //then
-    Assert.assertEquals(2, safeModeEvents.size());
-    //delayed messages are not yet sent (unless JVM is paused for 3 seconds)
-    Assert.assertEquals(1, delayedSafeModeEvents.size());
-
-    //event will be triggered after 3 seconds (see previous config)
-    GenericTestUtils.waitFor(() -> delayedSafeModeEvents.size() == 2,
-        300,
-        6000);
-
-  }
-  @Test
   public void testSafeModeExitRule() throws Exception {
     containers = new ArrayList<>();
     int numContainers = 100;
@@ -208,7 +166,7 @@
       container.setState(HddsProtos.LifeCycleState.CLOSED);
     }
     scmSafeModeManager = new SCMSafeModeManager(
-        config, containers, null, queue);
+        config, containers, null, queue, serviceManager, scmContext);
 
     long cutOff = (long) Math.ceil(numContainers * config.getDouble(
         HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT,
@@ -294,10 +252,17 @@
       OzoneConfiguration conf = createConf(100,
           0.9);
       MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager = new SCMPipelineManager(conf,
-          mockNodeManager, scmMetadataStore.getPipelineTable(), queue);
+      PipelineManager pipelineManager =
+          PipelineManagerV2Impl.newPipelineManager(
+              conf,
+              MockSCMHAManager.getInstance(true),
+              mockNodeManager,
+              scmMetadataStore.getPipelineTable(),
+              queue,
+              scmContext,
+              serviceManager);
       scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, pipelineManager, queue);
+          conf, containers, pipelineManager, queue, serviceManager, scmContext);
       fail("testFailWithIncorrectValueForHealthyPipelinePercent");
     } catch (IllegalArgumentException ex) {
       GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" +
@@ -312,10 +277,17 @@
       OzoneConfiguration conf = createConf(0.9,
           200);
       MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager = new SCMPipelineManager(conf,
-          mockNodeManager, scmMetadataStore.getPipelineTable(), queue);
+      PipelineManager pipelineManager =
+          PipelineManagerV2Impl.newPipelineManager(
+              conf,
+              MockSCMHAManager.getInstance(true),
+              mockNodeManager,
+              scmMetadataStore.getPipelineTable(),
+              queue,
+              scmContext,
+              serviceManager);
       scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, pipelineManager, queue);
+          conf, containers, pipelineManager, queue, serviceManager, scmContext);
       fail("testFailWithIncorrectValueForOneReplicaPipelinePercent");
     } catch (IllegalArgumentException ex) {
       GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" +
@@ -329,10 +301,17 @@
       OzoneConfiguration conf = createConf(0.9, 0.1);
       conf.setDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, -1.0);
       MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager = new SCMPipelineManager(conf,
-          mockNodeManager, scmMetadataStore.getPipelineTable(), queue);
+      PipelineManager pipelineManager =
+          PipelineManagerV2Impl.newPipelineManager(
+              conf,
+              MockSCMHAManager.getInstance(true),
+              mockNodeManager,
+              scmMetadataStore.getPipelineTable(),
+              queue,
+              scmContext,
+              serviceManager);
       scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, pipelineManager, queue);
+          conf, containers, pipelineManager, queue, serviceManager, scmContext);
       fail("testFailWithIncorrectValueForSafeModePercent");
     } catch (IllegalArgumentException ex) {
       GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" +
@@ -353,28 +332,40 @@
     containers.addAll(HddsTestUtils.getContainerInfo(containerCount));
 
     MockNodeManager mockNodeManager = new MockNodeManager(true, nodeCount);
-    SCMPipelineManager pipelineManager = new SCMPipelineManager(conf,
-        mockNodeManager, scmMetadataStore.getPipelineTable(), queue);
+    PipelineManagerV2Impl pipelineManager =
+        PipelineManagerV2Impl.newPipelineManager(
+            conf,
+            MockSCMHAManager.getInstance(true),
+            mockNodeManager,
+            scmMetadataStore.getPipelineTable(),
+            queue,
+            scmContext,
+            serviceManager);
     PipelineProvider mockRatisProvider =
         new MockRatisPipelineProvider(mockNodeManager,
-            pipelineManager.getStateManager(), config, true);
+            pipelineManager.getStateManager(), config);
     pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
         mockRatisProvider);
-    pipelineManager.allowPipelineCreation();
+    pipelineManager.getBackgroundPipelineCreator().stop();
 
-    for (int i=0; i < pipelineCount; i++) {
-      Pipeline pipeline = pipelineManager.
-              createPipeline(HddsProtos.ReplicationType.RATIS,
+    for (int i = 0; i < pipelineCount; i++) {
+      // Create pipeline
+      Pipeline pipeline = pipelineManager.createPipeline(
+          HddsProtos.ReplicationType.RATIS,
           HddsProtos.ReplicationFactor.THREE);
+
       pipelineManager.openPipeline(pipeline.getId());
+      // Mark pipeline healthy
+      pipeline = pipelineManager.getPipeline(pipeline.getId());
+      MockRatisPipelineProvider.markPipelineHealthy(pipeline);
     }
 
     for (ContainerInfo container : containers) {
       container.setState(HddsProtos.LifeCycleState.CLOSED);
     }
 
-    scmSafeModeManager = new SCMSafeModeManager(conf, containers,
-        pipelineManager, queue);
+    scmSafeModeManager = new SCMSafeModeManager(
+        conf, containers, pipelineManager, queue, serviceManager, scmContext);
 
     assertTrue(scmSafeModeManager.getInSafeMode());
     testContainerThreshold(containers, 1.0);
@@ -450,7 +441,7 @@
         1000,  5000);
   }
 
-  private void firePipelineEvent(SCMPipelineManager pipelineManager,
+  private void firePipelineEvent(PipelineManager pipelineManager,
       Pipeline pipeline) throws Exception {
     pipelineManager.openPipeline(pipeline.getId());
     queue.fireEvent(SCMEvents.OPEN_PIPELINE,
@@ -480,13 +471,12 @@
 
 
   @Test
-  public void testDisableSafeMode() {
+  public void testDisableSafeMode() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration(config);
     conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false);
     PipelineManager pipelineManager = Mockito.mock(PipelineManager.class);
-    Mockito.doNothing().when(pipelineManager).startPipelineCreator();
-    scmSafeModeManager =
-        new SCMSafeModeManager(conf, containers, pipelineManager, queue);
+    scmSafeModeManager = new SCMSafeModeManager(
+        conf, containers, pipelineManager, queue, serviceManager, scmContext);
     assertFalse(scmSafeModeManager.getInSafeMode());
   }
 
@@ -518,7 +508,7 @@
     }
 
     scmSafeModeManager = new SCMSafeModeManager(
-        config, containers, null, queue);
+        config, containers, null, queue, serviceManager, scmContext);
 
     assertTrue(scmSafeModeManager.getInSafeMode());
 
@@ -542,7 +532,7 @@
     OzoneConfiguration conf = new OzoneConfiguration(config);
     conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, numOfDns);
     scmSafeModeManager = new SCMSafeModeManager(
-        conf, containers, null, queue);
+        conf, containers, null, queue, serviceManager, scmContext);
 
     // Assert SCM is in Safe mode.
     assertTrue(scmSafeModeManager.getInSafeMode());
@@ -593,22 +583,32 @@
       config.setBoolean(
           HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
 
-      SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-          nodeManager, scmMetadataStore.getPipelineTable(), queue);
+      PipelineManagerV2Impl pipelineManager =
+          PipelineManagerV2Impl.newPipelineManager(
+              config,
+              MockSCMHAManager.getInstance(true),
+              nodeManager,
+              scmMetadataStore.getPipelineTable(),
+              queue,
+              scmContext,
+              serviceManager);
 
       PipelineProvider mockRatisProvider =
           new MockRatisPipelineProvider(nodeManager,
-              pipelineManager.getStateManager(), config, true);
+              pipelineManager.getStateManager(), config);
       pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
           mockRatisProvider);
-      pipelineManager.allowPipelineCreation();
 
       Pipeline pipeline = pipelineManager.createPipeline(
           HddsProtos.ReplicationType.RATIS,
           HddsProtos.ReplicationFactor.THREE);
 
+      pipeline = pipelineManager.getPipeline(pipeline.getId());
+      MockRatisPipelineProvider.markPipelineHealthy(pipeline);
+
       scmSafeModeManager = new SCMSafeModeManager(
-          config, containers, pipelineManager, queue);
+          config, containers, pipelineManager, queue, serviceManager,
+          scmContext);
 
       queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
           HddsTestUtils.createNodeRegistrationContainerReport(containers));
@@ -629,6 +629,7 @@
   }
 
   @Test
+  @Ignore("The test is failing, enable after fixing it")
   public void testPipelinesNotCreatedUntilPreCheckPasses()
       throws Exception {
     int numOfDns = 5;
@@ -647,20 +648,26 @@
     config.setBoolean(
         HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
 
-    SCMPipelineManager pipelineManager = new SCMPipelineManager(config,
-        nodeManager, scmMetadataStore.getPipelineTable(), queue);
-
+    PipelineManagerV2Impl pipelineManager =
+        PipelineManagerV2Impl.newPipelineManager(
+            config,
+            MockSCMHAManager.getInstance(true),
+            nodeManager,
+            scmMetadataStore.getPipelineTable(),
+            queue,
+            scmContext,
+            serviceManager);
 
     PipelineProvider mockRatisProvider =
         new MockRatisPipelineProvider(nodeManager,
-            pipelineManager.getStateManager(), config, true);
+            pipelineManager.getStateManager(), config);
     pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
         mockRatisProvider);
 
     SafeModeEventHandler smHandler = new SafeModeEventHandler();
     queue.addHandler(SCMEvents.SAFE_MODE_STATUS, smHandler);
     scmSafeModeManager = new SCMSafeModeManager(
-        config, containers, pipelineManager, queue);
+        config, containers, pipelineManager, queue, serviceManager, scmContext);
 
     // Assert SCM is in Safe mode.
     assertTrue(scmSafeModeManager.getInSafeMode());
@@ -685,11 +692,25 @@
     Assert.assertEquals(true, smHandler.getPreCheckComplete());
     Assert.assertEquals(true, smHandler.getIsInSafeMode());
 
-    // Create a pipeline and ensure safemode is exited.
-    pipelineManager.allowPipelineCreation();
-    Pipeline pipeline = pipelineManager.createPipeline(
-        HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.THREE);
+    /* There is a race condition where the background pipeline creation
+     * task creates the pipeline before the following create call.
+     * So wrapping it with try..catch.
+     */
+    Pipeline pipeline;
+    try {
+      pipeline = pipelineManager.createPipeline(
+          HddsProtos.ReplicationType.RATIS,
+          HddsProtos.ReplicationFactor.THREE);
+    } catch (SCMException ex) {
+      pipeline = pipelineManager.getPipelines(
+          HddsProtos.ReplicationType.RATIS,
+          HddsProtos.ReplicationFactor.THREE).get(0);
+    }
+
+    // Mark pipeline healthy
+    pipeline = pipelineManager.getPipeline(pipeline.getId());
+    MockRatisPipelineProvider.markPipelineHealthy(pipeline);
+
     firePipelineEvent(pipelineManager, pipeline);
 
     queue.processAll(5000);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 8c567e9..acd9762 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -23,6 +23,8 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB;
@@ -59,6 +61,8 @@
     File dir = GenericTestUtils.getRandomizedTestDir();
     config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
     SCMConfigurator configurator = new SCMConfigurator();
+    configurator.setSCMHAManager(MockSCMHAManager.getInstance(true));
+    configurator.setScmContext(SCMContext.emptyContext());
     scm = TestUtils.getScm(config, configurator);
     scm.start();
     scm.exitSafeMode();
@@ -69,7 +73,7 @@
 
     }
     server = scm.getBlockProtocolServer();
-    service = new ScmBlockLocationProtocolServerSideTranslatorPB(server,
+    service = new ScmBlockLocationProtocolServerSideTranslatorPB(server, scm,
         Mockito.mock(ProtocolMessageMetrics.class));
   }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java
index 96a4dc9..d36d20a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java
@@ -5,7 +5,7 @@
  * regarding copyright ownership.  The ASF licenses this file
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
+ * with the License.  You may obtain a copy of the License at
  *
  *      http://www.apache.org/licenses/LICENSE-2.0
  *
@@ -32,6 +32,8 @@
 import org.bouncycastle.asn1.x509.CRLReason;
 import org.bouncycastle.cert.X509CertificateHolder;
 import org.junit.After;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -50,6 +52,10 @@
 import java.util.Optional;
 import java.util.Set;
 
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.DATANODE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.OM;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.SCM;
+import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore.CertType.VALID_CERTS;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertEquals;
@@ -67,7 +73,7 @@
 
   private OzoneConfiguration config;
   private SCMMetadataStore scmMetadataStore;
-  private SCMCertStore scmCertStore;
+  private CertificateStore scmCertStore;
   private SecurityConfig securityConfig;
   private X509Certificate x509Certificate;
   private KeyPair keyPair;
@@ -90,7 +96,10 @@
   @Before
   public void initDbStore() throws IOException {
     scmMetadataStore = new SCMMetadataStoreImpl(config);
-    scmCertStore = new SCMCertStore(scmMetadataStore, INITIAL_SEQUENCE_ID);
+    scmCertStore = new SCMCertStore.Builder().setRatisServer(null)
+        .setCRLSequenceId(INITIAL_SEQUENCE_ID)
+        .setMetadaStore(scmMetadataStore)
+        .build();
   }
 
   @Before
@@ -116,12 +125,12 @@
   public void testRevokeCertificates() throws Exception {
 
     BigInteger serialID = x509Certificate.getSerialNumber();
-    scmCertStore.storeValidCertificate(serialID, x509Certificate);
+    scmCertStore.storeValidCertificate(serialID, x509Certificate, SCM);
     Date now = new Date();
 
     assertNotNull(
         scmCertStore.getCertificateByID(serialID,
-        CertificateStore.CertType.VALID_CERTS));
+        VALID_CERTS));
 
     X509CertificateHolder caCertificateHolder =
         new X509CertificateHolder(generateX509Cert().getEncoded());
@@ -136,7 +145,7 @@
 
     assertNull(
         scmCertStore.getCertificateByID(serialID,
-            CertificateStore.CertType.VALID_CERTS));
+            VALID_CERTS));
 
     assertNotNull(
         scmCertStore.getCertificateByID(serialID,
@@ -174,7 +183,7 @@
     List<BigInteger> newSerialIDs = new ArrayList<>();
     for (int i = 0; i<3; i++) {
       X509Certificate cert = generateX509Cert();
-      scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert);
+      scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, SCM);
       newSerialIDs.add(cert.getSerialNumber());
     }
 
@@ -222,7 +231,7 @@
   @Test
   public void testRevokeCertificatesForFutureTime() throws Exception {
     BigInteger serialID = x509Certificate.getSerialNumber();
-    scmCertStore.storeValidCertificate(serialID, x509Certificate);
+    scmCertStore.storeValidCertificate(serialID, x509Certificate, SCM);
     Date now = new Date();
     // Set revocation time in the future
     Date revocationTime = new Date(now.getTime()+500);
@@ -242,7 +251,7 @@
 
     assertNotNull(
         scmCertStore.getCertificateByID(serialID,
-            CertificateStore.CertType.VALID_CERTS));
+            VALID_CERTS));
 
     assertNull(
         scmCertStore.getCertificateByID(serialID,
@@ -266,4 +275,39 @@
 
     return size;
   }
+
+  @Test
+  public void testGetAndListCertificates() throws Exception {
+    X509Certificate cert = generateX509Cert();
+    scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, SCM);
+    checkListCerts(SCM, 1);
+
+    cert = generateX509Cert();
+    scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, SCM);
+    checkListCerts(SCM, 2);
+
+    cert = generateX509Cert();
+    scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, SCM);
+    checkListCerts(SCM, 3);
+
+    cert = generateX509Cert();
+    scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, OM);
+
+    // As for OM and DN all certs in valid certs table are returned.
+    // This test can be fixed once we have code for returning OM/DN certs.
+    checkListCerts(OM, 4);
+
+    cert = generateX509Cert();
+    scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, DATANODE);
+    checkListCerts(OM, 5);
+
+  }
+
+
+  private void checkListCerts(NodeType role, int expected) throws Exception {
+    List<X509Certificate> certificateList = scmCertStore.listCertificate(role,
+        BigInteger.valueOf(0), 10, VALID_CERTS);
+    Assert.assertEquals(expected, certificateList.size());
+  }
+
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java
index 6b9ccbf..1024756 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java
@@ -41,7 +41,7 @@
     config = new OzoneConfiguration();
     config.set(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
         OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT + ":0");
-    securityProtocolServer = new SCMSecurityProtocolServer(config, null);
+    securityProtocolServer = new SCMSecurityProtocolServer(config, null, null);
   }
 
   @After
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
index 4916190..09dd796 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java
@@ -88,6 +88,12 @@
   }
 
   @Test
+  public void testPassingBootStrapSwitchCallsBootStrap() {
+    executeCommand("--bootstrap");
+    assertTrue(mock.bootStrapCalled);
+  }
+
+  @Test
   public void testInitSwitchAcceptsClusterIdSSwitch() {
     executeCommand("--init", "--clusterid=abcdefg");
     assertEquals("abcdefg", mock.clusterId);
@@ -100,6 +106,12 @@
   }
 
   @Test
+  public void testBootStrapSwitchWithInvalidParamDoesNotRun() {
+    executeCommand("--bootstrap", "--clusterid=abcdefg", "--invalid");
+    assertFalse(mock.bootStrapCalled);
+  }
+
+  @Test
   public void testUnSuccessfulInitThrowsException() {
     mock.throwOnInit = true;
     try {
@@ -111,6 +123,17 @@
   }
 
   @Test
+  public void testUnSuccessfulBootStrapThrowsException() {
+    mock.throwOnBootstrap = true;
+    try {
+      executeCommand("--bootstrap");
+      fail("Exception show have been thrown");
+    } catch (Exception e) {
+      assertTrue(true);
+    }
+  }
+
+  @Test
   public void testGenClusterIdRunsGenerate() {
     executeCommand("--genclusterid");
     assertTrue(mock.generateCalled);
@@ -140,8 +163,10 @@
     private boolean initStatus = true;
     private boolean throwOnStart = false;
     private boolean throwOnInit  = false;
+    private boolean throwOnBootstrap  = false;
     private boolean startCalled = false;
     private boolean initCalled = false;
+    private boolean bootStrapCalled = false;
     private boolean generateCalled = false;
     private String clusterId = null;
 
@@ -165,6 +190,16 @@
     }
 
     @Override
+    public boolean bootStrap(OzoneConfiguration conf)
+        throws IOException {
+      if (throwOnBootstrap) {
+        throw new IOException("Simulated error on init");
+      }
+      bootStrapCalled = true;
+      return initStatus;
+    }
+
+    @Override
     public String generateClusterId() {
       generateCalled = true;
       return "static-cluster-id";
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 7a71c80..0bee9b8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -186,8 +186,8 @@
 
       // Now change server scmId, so datanode scmId  will be
       // different from SCM server response scmId
-      String newScmId = UUID.randomUUID().toString();
-      scmServerImpl.setScmId(newScmId);
+      String newClusterId = UUID.randomUUID().toString();
+      scmServerImpl.setClusterId(newClusterId);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       newState = versionTask.call();
       Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN,
@@ -195,11 +195,9 @@
       List<HddsVolume> volumesList = ozoneContainer.getVolumeSet()
           .getFailedVolumesList();
       Assert.assertTrue(volumesList.size() == 1);
-      File expectedScmDir = new File(volumesList.get(0).getHddsRootDir(),
-          scmServerImpl.getScmId());
-      Assert.assertTrue(logCapturer.getOutput().contains("expected scm " +
-          "directory " + expectedScmDir.getAbsolutePath() + " does not " +
-          "exist"));
+      Assert.assertTrue(logCapturer.getOutput()
+          .contains("org.apache.hadoop.ozone.common" +
+              ".InconsistentStorageStateException: Mismatched ClusterIDs"));
       Assert.assertTrue(ozoneContainer.getVolumeSet().getVolumesList().size()
           == 0);
       Assert.assertTrue(ozoneContainer.getVolumeSet().getFailedVolumesList()
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
index a6bd744..3d49e20 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics;
@@ -62,7 +63,7 @@
     SCMStorageConfig config =
         new SCMStorageConfig(NodeType.DATANODE, new File("/tmp"), "storage");
     nodeManager = new SCMNodeManager(source, config, publisher,
-        new NetworkTopologyImpl(source));
+        new NetworkTopologyImpl(source), SCMContext.emptyContext());
 
     registeredDatanode = DatanodeDetails.newBuilder()
         .setHostName("localhost")
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 66659c8..aab0524 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -17,13 +17,12 @@
  */
 package org.apache.hadoop.hdds.scm.cli;
 
-import javax.net.SocketFactory;
 import java.io.IOException;
-import java.net.InetSocketAddress;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -41,28 +40,21 @@
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneSecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.tuple.Pair;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient;
 import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * This class provides the client-facing APIs of container operations.
@@ -125,25 +117,13 @@
   }
 
   public static StorageContainerLocationProtocol newContainerRpcClient(
-      ConfigurationSource configSource) throws IOException {
-
-    Class<StorageContainerLocationProtocolPB> protocol =
-        StorageContainerLocationProtocolPB.class;
-    Configuration conf =
-        LegacyHadoopConfigurationSource.asHadoopConfiguration(configSource);
-    RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
-    long version = RPC.getProtocolVersion(protocol);
-    InetSocketAddress scmAddress = getScmAddressForClients(configSource);
-    UserGroupInformation user = UserGroupInformation.getCurrentUser();
-    SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(conf);
-    int rpcTimeOut = Client.getRpcTimeout(conf);
-
-    StorageContainerLocationProtocolPB rpcProxy =
-        RPC.getProxy(protocol, version, scmAddress, user, conf,
-            socketFactory, rpcTimeOut);
+      ConfigurationSource configSource) {
+    SCMContainerLocationFailoverProxyProvider proxyProvider =
+        new SCMContainerLocationFailoverProxyProvider(configSource);
 
     StorageContainerLocationProtocolClientSideTranslatorPB client =
-        new StorageContainerLocationProtocolClientSideTranslatorPB(rpcProxy);
+        new StorageContainerLocationProtocolClientSideTranslatorPB(
+            proxyProvider);
     return TracingUtil.createProxy(
         client, StorageContainerLocationProtocol.class, configSource);
   }
@@ -550,6 +530,11 @@
     return storageContainerLocationClient.getReplicationManagerStatus();
   }
 
+  @Override
+  public List<String> getScmRatisRoles() throws IOException {
+    return storageContainerLocationClient.getScmInfo().getRatisPeerRoles();
+  }
+
   /**
    * Get Datanode Usage information by ipaddress or uuid.
    *
@@ -580,5 +565,4 @@
       boolean mostUsed, int count) throws IOException {
     return storageContainerLocationClient.getDatanodeUsageInfo(mostUsed, count);
   }
-
 }
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java
index 076a28a..d439681 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java
@@ -20,11 +20,13 @@
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.cli.GenericParentCommand;
+import org.apache.hadoop.hdds.conf.ConfigurationException;
 import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
 import picocli.CommandLine;
 
 import java.io.IOException;
@@ -45,6 +47,11 @@
       description = "The destination scm (host:port)")
   private String scm;
 
+
+  @CommandLine.Option(names = {"--service-id", "-id"}, description =
+      "ServiceId of SCM HA Cluster")
+  private String scmServiceId;
+
   public ScmClient createScmClient() {
     try {
       GenericParentCommand parent = (GenericParentCommand)
@@ -62,13 +69,23 @@
     if (StringUtils.isNotEmpty(scm)) {
       conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm);
     }
-    if (!HddsUtils.getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) {
 
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY
-              + " should be set in ozone-site.xml or with the --scm option");
+    // Use the scm service Id passed from the client.
+
+    if (StringUtils.isNotEmpty(scmServiceId)) {
+      conf.set(ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID, scmServiceId);
+    } else if (StringUtils.isBlank(SCMHAUtils.getScmServiceId(conf))) {
+      // Scm service id is not passed, and scm service id is not defined in
+      // the config, assuming it should be non-HA cluster.
+      if (!HddsUtils.getHostNameFromConfigKeys(conf,
+          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) {
+
+        throw new ConfigurationException(
+            ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY
+                + " should be set in ozone-site.xml or with the --scm option");
+      }
     }
+
   }
 
   public SCMSecurityProtocol createScmSecurityClient() {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index b2a4e92..96a4c42 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -330,7 +330,7 @@
     // if the container needs to be excluded , add the container to the
     // exclusion list , otherwise add the pipeline to the exclusion list
     if (containerExclusionException) {
-      excludeList.addConatinerId(ContainerID.valueof(containerId));
+      excludeList.addConatinerId(ContainerID.valueOf(containerId));
     } else {
       excludeList.addPipeline(pipelineId);
     }
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
index 2220668..ad1f1fd 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
@@ -202,28 +202,6 @@
   }
 
   @Test
-  public void testBlockClientFailsWithMultipleScmNames() {
-    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
-    // are undefined, fail if OZONE_SCM_NAMES has multiple SCMs.
-    final String scmHost = "host123,host456";
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    thrown.expect(IllegalArgumentException.class);
-    HddsUtils.getScmAddressForBlockClients(conf);
-  }
-
-  @Test
-  public void testClientFailsWithMultipleScmNames() {
-    // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, fail if OZONE_SCM_NAMES
-    // has multiple SCMs.
-    final String scmHost = "host123,host456";
-    final OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OZONE_SCM_NAMES, scmHost);
-    thrown.expect(IllegalArgumentException.class);
-    HddsUtils.getScmAddressForClients(conf);
-  }
-
-  @Test
   public void testVerifyResourceName() {
     final String validName = "my-bucket.01";
     HddsClientUtils.verifyResourceName(validName);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index c477eef..c389bec 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -44,6 +44,7 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.conf.OMClientConfig;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
@@ -51,7 +52,6 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.SecretManager;
 
-import com.google.common.base.Joiner;
 import org.apache.commons.lang3.StringUtils;
 import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
 import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
@@ -121,7 +121,7 @@
       }
       for (String nodeId : getOMNodeIds(conf, serviceId)) {
         String rpcAddr = getOmRpcAddress(conf,
-            addKeySuffixes(OZONE_OM_ADDRESS_KEY, serviceId, nodeId));
+            ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, serviceId, nodeId));
         if (rpcAddr != null) {
           result.get(serviceId).add(NetUtils.createSocketAddr(rpcAddr));
         } else {
@@ -318,49 +318,11 @@
   }
 
   /**
-   * Add non empty and non null suffix to a key.
-   */
-  private static String addSuffix(String key, String suffix) {
-    if (suffix == null || suffix.isEmpty()) {
-      return key;
-    }
-    assert !suffix.startsWith(".") :
-        "suffix '" + suffix + "' should not already have '.' prepended.";
-    return key + "." + suffix;
-  }
-
-  /**
-   * Concatenate list of suffix strings '.' separated.
-   */
-  private static String concatSuffixes(String... suffixes) {
-    if (suffixes == null) {
-      return null;
-    }
-    return Joiner.on(".").skipNulls().join(suffixes);
-  }
-
-  /**
-   * Return configuration key of format key.suffix1.suffix2...suffixN.
-   */
-  public static String addKeySuffixes(String key, String... suffixes) {
-    String keySuffix = concatSuffixes(suffixes);
-    return addSuffix(key, keySuffix);
-  }
-
-  /**
-   * Match input address to local address.
-   * Return true if it matches, false otherwsie.
-   */
-  public static boolean isAddressLocal(InetSocketAddress addr) {
-    return NetUtils.isLocalAddress(addr.getAddress());
-  }
-
-  /**
    * Get a collection of all omNodeIds for the given omServiceId.
    */
   public static Collection<String> getOMNodeIds(ConfigurationSource conf,
       String omServiceId) {
-    String key = addSuffix(OZONE_OM_NODES_KEY, omServiceId);
+    String key = ConfUtils.addSuffix(OZONE_OM_NODES_KEY, omServiceId);
     return conf.getTrimmedStringCollection(key);
   }
 
@@ -385,7 +347,7 @@
    */
   public static String getConfSuffixedWithOMNodeId(ConfigurationSource conf,
       String confKey, String omServiceID, String omNodeId) {
-    String suffixedConfKey = OmUtils.addKeySuffixes(
+    String suffixedConfKey = ConfUtils.addKeySuffixes(
         confKey, omServiceID, omNodeId);
     String confValue = conf.getTrimmed(suffixedConfKey);
     if (StringUtils.isNotEmpty(confValue)) {
@@ -404,13 +366,16 @@
   public static String getHttpAddressForOMPeerNode(ConfigurationSource conf,
       String omServiceId, String omNodeId, String omNodeHostAddr) {
     final Optional<String> bindHost = getHostNameFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTP_BIND_HOST_KEY, omServiceId, omNodeId));
+        ConfUtils.addKeySuffixes(
+            OZONE_OM_HTTP_BIND_HOST_KEY, omServiceId, omNodeId));
 
     final OptionalInt addressPort = getPortNumberFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId));
+        ConfUtils.addKeySuffixes(
+            OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId));
 
     final Optional<String> addressHost = getHostNameFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId));
+        ConfUtils.addKeySuffixes(
+            OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId));
 
     String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr));
 
@@ -427,13 +392,16 @@
   public static String getHttpsAddressForOMPeerNode(ConfigurationSource conf,
       String omServiceId, String omNodeId, String omNodeHostAddr) {
     final Optional<String> bindHost = getHostNameFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTPS_BIND_HOST_KEY, omServiceId, omNodeId));
+        ConfUtils.addKeySuffixes(
+            OZONE_OM_HTTPS_BIND_HOST_KEY, omServiceId, omNodeId));
 
     final OptionalInt addressPort = getPortNumberFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId));
+        ConfUtils.addKeySuffixes(
+            OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId));
 
     final Optional<String> addressHost = getHostNameFromConfigKeys(conf,
-        addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId));
+        ConfUtils.addKeySuffixes(
+            OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId));
 
     String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr));
 
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/NodeDetails.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/NodeDetails.java
new file mode 100644
index 0000000..c73abb9
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/NodeDetails.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.ha;
+
+
+import org.apache.hadoop.net.NetUtils;
+
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+
+public class NodeDetails {
+  private String serviceId;
+  private String nodeId;
+  private InetSocketAddress rpcAddress;
+  private int ratisPort;
+  private String httpAddress;
+  private String httpsAddress;
+
+  /**
+   * Constructs NodeDetails object.
+   */
+  public NodeDetails(String serviceId, String nodeId,
+                        InetSocketAddress rpcAddr, int ratisPort,
+                        String httpAddress, String httpsAddress) {
+    this.serviceId = serviceId;
+    this.nodeId = nodeId;
+    this.rpcAddress = rpcAddr;
+    this.ratisPort = ratisPort;
+    this.httpAddress = httpAddress;
+    this.httpsAddress = httpsAddress;
+  }
+
+  public String getServiceId() {
+    return serviceId;
+  }
+
+  public String getNodeId() {
+    return nodeId;
+  }
+
+  public InetSocketAddress getRpcAddress() {
+    return rpcAddress;
+  }
+
+  public boolean isHostUnresolved() {
+    return rpcAddress.isUnresolved();
+  }
+
+  public InetAddress getInetAddress() {
+    return rpcAddress.getAddress();
+  }
+
+  public String getHostName() {
+    return rpcAddress.getHostName();
+  }
+
+  public String getRatisHostPortStr() {
+    StringBuilder hostPort = new StringBuilder();
+    hostPort.append(getHostName())
+        .append(":")
+        .append(ratisPort);
+    return hostPort.toString();
+  }
+
+  public String getRatisAddressPortStr() {
+    StringBuilder hostPort = new StringBuilder();
+    hostPort.append(getInetAddress().getHostAddress())
+        .append(":")
+        .append(ratisPort);
+    return hostPort.toString();
+  }
+
+
+  public int getRatisPort() {
+    return ratisPort;
+  }
+
+  public String getRpcAddressString() {
+    return NetUtils.getHostPortString(rpcAddress);
+  }
+
+  public String getHttpAddress() {
+    return httpAddress;
+  }
+
+  public String getHttpsAddress() {
+    return httpsAddress;
+  }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
similarity index 87%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
index 4944017..d2867c4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ha/package-info.java
@@ -15,8 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
-
+package org.apache.hadoop.ozone.ha;
 /**
- * This package contains classes related to Apache Ratis for SCM.
- */
+ This package contains HA related code.
+ */
\ No newline at end of file
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
index 0d513ce..02b2c84 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException;
 import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException;
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
@@ -129,7 +130,7 @@
 
       for (String nodeId : OmUtils.emptyAsSingletonNull(omNodeIds)) {
 
-        String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
+        String rpcAddrKey = ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
             serviceId, nodeId);
         String rpcAddrStr = OmUtils.getOmRpcAddress(config, rpcAddrKey);
         if (rpcAddrStr == null) {
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
index d5e5618..2158917 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
@@ -25,12 +25,12 @@
 import java.util.StringJoiner;
 
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Before;
 
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
@@ -61,11 +61,11 @@
     StringJoiner allNodeIds = new StringJoiner(",");
     for (int i = 1; i <= numNodes; i++) {
       String nodeId = NODE_ID_BASE_STR + i;
-      config.set(OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, OM_SERVICE_ID,
+      config.set(ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, OM_SERVICE_ID,
           nodeId), DUMMY_NODE_ADDR);
       allNodeIds.add(nodeId);
     }
-    config.set(OmUtils.addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID),
+    config.set(ConfUtils.addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID),
         allNodeIds.toString());
     provider = new OMFailoverProxyProvider(config,
         UserGroupInformation.getCurrentUser(), OM_SERVICE_ID);
@@ -116,7 +116,7 @@
    */
   @Test
   public void testWaitTimeWithSuggestedNewNode() {
-    Collection<String> allNodeIds = config.getTrimmedStringCollection(OmUtils.
+    Collection<String> allNodeIds = config.getTrimmedStringCollection(ConfUtils.
         addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID));
     allNodeIds.remove(provider.getCurrentProxyOMNodeId());
     Assert.assertTrue("This test needs at least 2 OMs",
@@ -176,11 +176,12 @@
     StringJoiner allNodeIds = new StringJoiner(",");
     for (int i = 1; i <= numNodes; i++) {
       String nodeId = NODE_ID_BASE_STR + i;
-      ozoneConf.set(OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, OM_SERVICE_ID,
+      ozoneConf.set(
+          ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, OM_SERVICE_ID,
           nodeId), nodeAddrs.get(i-1));
       allNodeIds.add(nodeId);
     }
-    ozoneConf.set(OmUtils.addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID),
+    ozoneConf.set(ConfUtils.addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID),
         allNodeIds.toString());
     OMFailoverProxyProvider prov = new OMFailoverProxyProvider(ozoneConf,
         UserGroupInformation.getCurrentUser(), OM_SERVICE_ID);
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
index b9f4b60..48adc78 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
@@ -68,15 +68,35 @@
       - 9862
     hostname: om3
     command: ["ozone","om"]
-  scm:
+  scm1:
     <<: *common-config
     ports:
-      - 9876:9876
+      - 9876
     environment:
       ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
       OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
       <<: *replication
     command: ["ozone","scm"]
+  scm2:
+    <<: *common-config
+    ports:
+      - 9876
+    environment:
+      WAITFOR: scm1:9865
+      ENSURE_SCM_BOOTSTRAPPED: /data/metadata/scm/current/VERSION
+      OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
+      <<: *replication
+    command: ["ozone","scm"]
+  scm3:
+    <<: *common-config
+    ports:
+      - 9876
+    environment:
+      WAITFOR: scm2:9865
+      ENSURE_SCM_BOOTSTRAPPED: /data/metadata/scm/current/VERSION
+      OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
+      <<: *replication
+    command: ["ozone","scm"]
   s3g:
     <<: *common-config
     environment:
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
index 2105718..70895a0 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
@@ -23,12 +23,15 @@
 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3
 OZONE-SITE.XML_ozone.om.ratis.enable=true
 
-OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.scm.service.ids=scmservice
+OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3
+OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1
+OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2
+OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3
+OZONE-SITE.XML_ozone.scm.ratis.enable=true
 OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.scm.container.size=1GB
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
-OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 OZONE-SITE.XML_ozone.datanode.pipeline.limit=1
 
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
index 5913b65..7b1f9e5 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh
@@ -20,6 +20,7 @@
 
 export SECURITY_ENABLED=false
 export OZONE_REPLICATION_FACTOR=3
+export SCM=scm1
 export OM_SERVICE_ID=omservice
 
 # shellcheck source=/dev/null
@@ -27,10 +28,10 @@
 
 start_docker_env
 
-execute_robot_test scm basic/ozone-shell-single.robot
-execute_robot_test scm basic/links.robot
-execute_robot_test scm s3
-execute_robot_test scm freon
+execute_robot_test ${SCM} basic/ozone-shell-single.robot
+execute_robot_test ${SCM} basic/links.robot
+execute_robot_test ${SCM} s3
+execute_robot_test ${SCM} freon
 
 stop_docker_env
 
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
index 31011fa..a4c8e80d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml
@@ -104,5 +104,6 @@
       KERBEROS_KEYTABS: scm HTTP testuser testuser2
       ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
       OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "${OZONE_SAFEMODE_MIN_DATANODES:-1}"
+      OZONE-SITE.XML_ozone.scm.ratis.enable: "${OZONE_SCM_RATIS_ENABLE:-false}"
       OZONE_OPTS:
     command: ["/opt/hadoop/bin/ozone","scm"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index 9150b24..c482396 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -27,39 +27,42 @@
 
 : ${OZONE_BUCKET_KEY_NAME:=key1}
 
-start_docker_env
+for enable in true false; do
 
-execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME}
+  start_docker_env 3 "${enable}"
 
-execute_robot_test scm kinit.robot
+  execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME}
 
-execute_robot_test scm basic
+  execute_robot_test scm kinit.robot
 
-execute_robot_test scm security
+  execute_robot_test scm basic
 
-for scheme in ofs o3fs; do
-  for bucket in link bucket; do
-    execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${scheme}-${bucket} ozonefs/ozonefs.robot
+  execute_robot_test scm security
+
+  for scheme in ofs o3fs; do
+    for bucket in link bucket; do
+      execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${scheme}-${bucket} ozonefs/ozonefs.robot
+    done
   done
-done
 
-for bucket in link generated; do
-  execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} s3
-done
+  for bucket in link generated; do
+    execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} s3
+  done
 
-#expects 4 pipelines, should be run before 
-#admincli which creates STANDALONE pipeline
-execute_robot_test scm recon
+  #expects 4 pipelines, should be run before
+  #admincli which creates STANDALONE pipeline
+  execute_robot_test scm recon
 
-execute_robot_test scm admincli
-execute_robot_test scm spnego
+  execute_robot_test scm admincli
+  execute_robot_test scm spnego
 
-# test replication
-docker-compose up -d --scale datanode=2
-execute_robot_test scm -v container:1 -v count:2 replication/wait.robot
-docker-compose up -d --scale datanode=3
-execute_robot_test scm -v container:1 -v count:3 replication/wait.robot
+  # test replication
+  docker-compose up -d --scale datanode=2
+  execute_robot_test scm -v container:1 -v count:2 replication/wait.robot
+  docker-compose up -d --scale datanode=3
+  execute_robot_test scm -v container:1 -v count:3 replication/wait.robot
 
-stop_docker_env
+  stop_docker_env
 
-generate_report
+  generate_report
+done
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh
index c2a75b9..40ff2a2 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -28,6 +28,8 @@
   OM_HA_PARAM="--om-service-id=${OM_SERVICE_ID}"
 fi
 
+: ${SCM:=scm}
+
 ## @description create results directory, purging any prior data
 create_results_dir() {
   #delete previous results
@@ -74,9 +76,9 @@
      #This line checks the safemode status in scm
      local command="${OZONE_SAFEMODE_STATUS_COMMAND}"
      if [[ "${SECURITY_ENABLED}" == 'true' ]]; then
-         status=$(docker-compose exec -T scm bash -c "kinit -k HTTP/scm@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && $command" || true)
+         status=$(docker-compose exec -T ${SCM} bash -c "kinit -k HTTP/${SCM}@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && $command" || true)
      else
-         status=$(docker-compose exec -T scm bash -c "$command")
+         status=$(docker-compose exec -T ${SCM} bash -c "$command")
      fi
 
      echo "SECONDS: $SECONDS"
@@ -110,9 +112,9 @@
   while [[ $SECONDS -lt 120 ]]; do
     local command="ozone admin om roles --service-id '${OM_SERVICE_ID}'"
     if [[ "${SECURITY_ENABLED}" == 'true' ]]; then
-      status=$(docker-compose exec -T scm bash -c "kinit -k scm/scm@EXAMPLE.COM -t /etc/security/keytabs/scm.keytab && $command" | grep LEADER)
+      status=$(docker-compose exec -T ${SCM} bash -c "kinit -k scm/${SCM}@EXAMPLE.COM -t /etc/security/keytabs/scm.keytab && $command" | grep LEADER)
     else
-      status=$(docker-compose exec -T scm bash -c "$command" | grep LEADER)
+      status=$(docker-compose exec -T ${SCM} bash -c "$command" | grep LEADER)
     fi
     if [[ -n "${status}" ]]; then
       echo "Found OM leader for service ${OM_SERVICE_ID}: $status"
@@ -136,6 +138,8 @@
 
   create_results_dir
   export OZONE_SAFEMODE_MIN_DATANODES="${datanode_count}"
+
+  export OZONE_SCM_RATIS_ENABLE=${2:-false}
   docker-compose --no-ansi down
   if ! { docker-compose --no-ansi up -d --scale datanode="${datanode_count}" \
       && wait_for_safemode_exit \
@@ -180,6 +184,7 @@
       -v OM_SERVICE_ID:"${OM_SERVICE_ID:-om}" \
       -v OZONE_DIR:"${OZONE_DIR}" \
       -v SECURITY_ENABLED:"${SECURITY_ENABLED}" \
+      -v SCM:"${SCM}" \
       ${ARGUMENTS[@]} --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" \
       "$SMOKETEST_DIR_INSIDE/$TEST"
   local -i rc=$?
@@ -251,7 +256,7 @@
 
   while [[ $SECONDS -lt $timeout ]]; do
      set +e
-     docker-compose exec -T scm /bin/bash -c "nc -z $host $port"
+     docker-compose exec -T ${SCM} /bin/bash -c "nc -z $host $port"
      status=$?
      set -e
      if [ $status -eq 0 ] ; then
diff --git a/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh b/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh
index 95df27f..7c0732e 100755
--- a/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh
+++ b/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh
@@ -115,6 +115,11 @@
     # Improve om and scm start up options
     /opt/hadoop/bin/ozone scm --init || /opt/hadoop/bin/ozone scm -init
   fi
+elif [ -n "$ENSURE_SCM_BOOTSTRAPPED" ]; then
+  if [ ! -f "$ENSURE_SCM_BOOTSTRAPPED" ]; then
+    # Improve om and scm start up options
+    /opt/hadoop/bin/ozone scm --bootstrap
+  fi
 fi
 
 if [ -n "$ENSURE_OM_INITIALIZED" ]; then
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/om-ha.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/om-ha.yaml
new file mode 100644
index 0000000..3832bb2
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/om-ha.yaml
@@ -0,0 +1,81 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+name: ozone/scm-ha
+description: Enable HA for SCM components
+---
+- type: remove
+  trigger:
+    metadata:
+      name: config
+  path:
+    - data
+    - OZONE-SITE.XML_ozone.scm.block.client.address
+- type: remove
+  trigger:
+    metadata:
+      name: config
+  path:
+    - data
+    - OZONE-SITE.XML_ozone.scm.client.address
+- type: remove
+  trigger:
+    metadata:
+      name: config
+  path:
+    - data
+    - OZONE-SITE.XML_ozone.scm.names
+- type: add
+  trigger:
+    metadata:
+      name: config
+  path:
+    - data
+  value:
+    OZONE-SITE.XML_ozone.scm.service.ids: scmservice
+    OZONE-SITE.XML_ozone.scm.nodes.scmservice: scm0,scm1,scm2
+    OZONE-SITE.XML_ozone.scm.address.scmservice.scm0: scm-0.scm.default.svc.cluster.local
+    OZONE-SITE.XML_ozone.scm.address.scmservice.scm1: scm-1.scm.default.svc.cluster.local
+    OZONE-SITE.XML_ozone.scm.address.scmservice.scm2: scm-2.scm.default.svc.cluster.local
+    OZONE-SITE.XML_ozone.scm.ratis.enable: "true"
+    OZONE-SITE.XML_ozone.scm.primordial.node.id: scm0
+- type: add
+  trigger:
+    metadata:
+      name: scm
+  path:
+    - spec
+    - template
+    - spec
+    - initContainers
+  value:
+    - name: bootstrap
+      image: "@docker.image@"
+      args:
+        - ozone
+        - scm
+        - '--bootstrap'
+      envFrom:
+        - configMapRef:
+            name: config
+- type: replace
+  trigger:
+    metadata:
+      name: scm
+    kind: StatefulSet
+  path:
+    - spec
+    - replicas
+  value: 3
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/scm-ha.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/scm-ha.yaml
new file mode 100644
index 0000000..b7aca3c
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/scm-ha.yaml
@@ -0,0 +1,81 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the Li-cense.
+name: ozone/scm-ha
+description: Enable HA for SCM component
+---
+- type: remove
+  trigger:
+    metadata:
+      name: config
+  path:
+    - data
+    - OZONE-SITE.XML_ozone.scm.block.client.address
+- type: remove
+  trigger:
+    metadata:
+      name: config
+  path:
+    - data
+    - OZONE-SITE.XML_ozone.scm.client.address
+- type: remove
+  trigger:
+    metadata:
+      name: config
+  path:
+    - data
+    - OZONE-SITE.XML_ozone.scm.names
+- type: add
+  trigger:
+    metadata:
+      name: config
+  path:
+    - data
+  value:
+    OZONE-SITE.XML_ozone.scm.service.ids: scmservice
+    OZONE-SITE.XML_ozone.scm.nodes.scmservice: scm0,scm1,scm2
+    OZONE-SITE.XML_ozone.scm.address.scmservice.scm0: scm-0.scm.default.svc.cluster.local
+    OZONE-SITE.XML_ozone.scm.address.scmservice.scm1: scm-1.scm.default.svc.cluster.local
+    OZONE-SITE.XML_ozone.scm.address.scmservice.scm2: scm-2.scm.default.svc.cluster.local
+    OZONE-SITE.XML_ozone.scm.ratis.enable: "true"
+    OZONE-SITE.XML_ozone.scm.primordial.node.id: scm0
+- type: add
+  trigger:
+    metadata:
+      name: scm
+  path:
+    - spec
+    - template
+    - spec
+    - initContainers
+  value:
+    - name: bootstrap
+      image: "@docker.image@"
+      args:
+        - ozone
+        - scm
+        - '--bootstrap'
+      envFrom:
+        - configMapRef:
+            name: config
+- type: replace
+  trigger:
+    metadata:
+      name: scm
+    kind: StatefulSet
+  path:
+    - spec
+    - replicas
+  value: 3
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/switchtoemptydir.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/switchtoemptydir.yaml
new file mode 100644
index 0000000..fff60c9
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/switchtoemptydir.yaml
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+name: ozone/switchtoemptydir
+description: Replace /data volume with empty dir based ephemeral persistence
+---
+- type: Remove
+  trigger:
+    metadata:
+      labels:
+        app.kubernetes.io/component: ozone
+  path:
+    - spec
+    - template
+    - spec
+    - volumes
+    - data
+- type: Add
+  trigger:
+    metadata:
+      labels:
+        app.kubernetes.io/component: ozone
+  path:
+    - spec
+    - template
+    - spec
+    - volumes
+  value:
+    - name: data
+      emptyDir: {}
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/Flekszible
new file mode 100644
index 0000000..b800f89
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/Flekszible
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+source:
+  - path: ../../definitions
+import:
+  - path: ozone
+    transformations:
+    - type: Image
+      image: "@docker.image@"
+    - type: ozone/persistence
+  - path: ozone/freon
+    destination: freon
+    transformations:
+    - type: Image
+      image: "@docker.image@"
+transformations:
+  - type: Namespace
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/LICENSE.header
new file mode 100644
index 0000000..635f0d9
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/LICENSE.header
@@ -0,0 +1,15 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml
new file mode 100644
index 0000000..5bd722d
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: config
+data:
+  OZONE-SITE.XML_hdds.datanode.dir: /data/storage
+  OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data
+  OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata
+  OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm
+  OZONE-SITE.XML_ozone.om.address: om-0.om
+  OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm
+  OZONE-SITE.XML_ozone.scm.names: scm-0.scm
+  OZONE-SITE.XML_hdds.scm.safemode.min.datanode: "3"
+  OZONE-SITE.XML_ozone.datanode.pipeline.limit: "1"
+  LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout
+  LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender
+  LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout
+  LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd
+    HH:mm:ss} %-5p %c{1}:%L - %m%n'
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/datanode-service.yaml
new file mode 100644
index 0000000..929e7a2
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/datanode-service.yaml
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: datanode
+spec:
+  ports:
+  - port: 9870
+    name: rpc
+  clusterIP: None
+  selector:
+    app: ozone
+    component: datanode
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/datanode-statefulset.yaml
new file mode 100644
index 0000000..a3aa528
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/datanode-statefulset.yaml
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: datanode
+  labels:
+    app.kubernetes.io/component: ozone
+spec:
+  selector:
+    matchLabels:
+      app: ozone
+      component: datanode
+  serviceName: datanode
+  replicas: 3
+  template:
+    metadata:
+      labels:
+        app: ozone
+        component: datanode
+      annotations:
+        prometheus.io/scrape: "true"
+        prometheus.io/port: "9882"
+        prometheus.io/path: /prom
+    spec:
+      affinity:
+        podAntiAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchExpressions:
+              - key: component
+                operator: In
+                values:
+                - datanode
+            topologyKey: kubernetes.io/hostname
+      securityContext:
+        fsGroup: 1000
+      containers:
+      - name: datanode
+        image: '@docker.image@'
+        args:
+        - ozone
+        - datanode
+        envFrom:
+        - configMapRef:
+            name: config
+        volumeMounts:
+        - name: data
+          mountPath: /data
+  volumeClaimTemplates:
+  - metadata:
+      name: data
+    spec:
+      accessModes:
+      - ReadWriteOnce
+      resources:
+        requests:
+          storage: 2Gi
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/freon/freon-deployment.yaml
new file mode 100644
index 0000000..9c14033
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/freon/freon-deployment.yaml
@@ -0,0 +1,46 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: freon
+  labels:
+    app.kubernetes.io/component: ozone
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: ozone
+      component: freon
+  template:
+    metadata:
+      labels:
+        app: ozone
+        component: freon
+    spec:
+      containers:
+      - name: freon
+        image: '@docker.image@'
+        args:
+        - ozone
+        - freon
+        - rk
+        - --factor=THREE
+        - --replication-type=RATIS
+        envFrom:
+        - configMapRef:
+            name: config
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/om-service.yaml
new file mode 100644
index 0000000..617277d
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/om-service.yaml
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: om
+spec:
+  ports:
+  - port: 9874
+    name: ui
+  clusterIP: None
+  selector:
+    app: ozone
+    component: om
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/om-statefulset.yaml
new file mode 100644
index 0000000..ad0b16e
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/om-statefulset.yaml
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: om
+  labels:
+    app.kubernetes.io/component: ozone
+spec:
+  selector:
+    matchLabels:
+      app: ozone
+      component: om
+  serviceName: om
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: ozone
+        component: om
+      annotations:
+        prometheus.io/scrape: "true"
+        prometheus.io/port: "9874"
+        prometheus.io/path: /prom
+    spec:
+      securityContext:
+        fsGroup: 1000
+      containers:
+      - name: om
+        image: '@docker.image@'
+        args:
+        - ozone
+        - om
+        env:
+        - name: WAITFOR
+          value: scm-0.scm:9876
+        - name: ENSURE_OM_INITIALIZED
+          value: /data/metadata/om/current/VERSION
+        livenessProbe:
+          tcpSocket:
+            port: 9862
+          initialDelaySeconds: 30
+        envFrom:
+        - configMapRef:
+            name: config
+        volumeMounts:
+        - name: data
+          mountPath: /data
+      volumes: []
+  volumeClaimTemplates:
+  - metadata:
+      name: data
+    spec:
+      accessModes:
+      - ReadWriteOnce
+      resources:
+        requests:
+          storage: 2Gi
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/s3g-service.yaml
new file mode 100644
index 0000000..dd1ca83
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/s3g-service.yaml
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: s3g
+spec:
+  ports:
+  - port: 9878
+    name: rest
+  clusterIP: None
+  selector:
+    app: ozone
+    component: s3g
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/s3g-statefulset.yaml
new file mode 100644
index 0000000..6e96fb7
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/s3g-statefulset.yaml
@@ -0,0 +1,61 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: s3g
+  labels:
+    app.kubernetes.io/component: ozone
+spec:
+  selector:
+    matchLabels:
+      app: ozone
+      component: s3g
+  serviceName: s3g
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: ozone
+        component: s3g
+    spec:
+      containers:
+      - name: s3g
+        image: '@docker.image@'
+        args:
+        - ozone
+        - s3g
+        livenessProbe:
+          httpGet:
+            path: /
+            port: 9878
+          initialDelaySeconds: 30
+        envFrom:
+        - configMapRef:
+            name: config
+        volumeMounts:
+        - name: data
+          mountPath: /data
+  volumeClaimTemplates:
+  - metadata:
+      name: data
+    spec:
+      accessModes:
+      - ReadWriteOnce
+      resources:
+        requests:
+          storage: 2Gi
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/scm-service.yaml
new file mode 100644
index 0000000..0df15d6
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/scm-service.yaml
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: scm
+spec:
+  ports:
+  - port: 9876
+    name: ui
+  clusterIP: None
+  selector:
+    app: ozone
+    component: scm
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/scm-statefulset.yaml
new file mode 100644
index 0000000..d4d6513
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/scm-statefulset.yaml
@@ -0,0 +1,79 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: scm
+  labels:
+    app.kubernetes.io/component: ozone
+spec:
+  selector:
+    matchLabels:
+      app: ozone
+      component: scm
+  serviceName: scm
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: ozone
+        component: scm
+      annotations:
+        prometheus.io/scrape: "true"
+        prometheus.io/port: "9876"
+        prometheus.io/path: /prom
+    spec:
+      securityContext:
+        fsGroup: 1000
+      initContainers:
+      - name: init
+        image: '@docker.image@'
+        args:
+        - ozone
+        - scm
+        - --init
+        envFrom:
+        - configMapRef:
+            name: config
+        volumeMounts:
+        - name: data
+          mountPath: /data
+      containers:
+      - name: scm
+        image: '@docker.image@'
+        args:
+        - ozone
+        - scm
+        livenessProbe:
+          tcpSocket:
+            port: 9861
+          initialDelaySeconds: 30
+        envFrom:
+        - configMapRef:
+            name: config
+        volumeMounts:
+        - name: data
+          mountPath: /data
+  volumeClaimTemplates:
+  - metadata:
+      name: data
+    spec:
+      accessModes:
+      - ReadWriteOnce
+      resources:
+        requests:
+          storage: 2Gi
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
index b3d1c33..bcc5fd6 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
@@ -22,6 +22,7 @@
 
 *** Variables ***
 ${CONTAINER}
+${SCM}       scm
 
 *** Keywords ***
 Create test data
@@ -40,7 +41,7 @@
                         Should contain   ${output}   OPEN
 
 List containers with explicit host
-    ${output} =         Execute          ozone admin container list --scm scm
+    ${output} =         Execute          ozone admin container list --scm ${SCM}
                         Should contain   ${output}   OPEN
 
 List containers with container state
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot
index 3a97f83..00e09fe 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot
@@ -21,6 +21,7 @@
 
 *** Variables ***
 ${PIPELINE}
+${SCM}       scm
 
 *** Test Cases ***
 Create pipeline
@@ -34,7 +35,7 @@
                         Should contain   ${output}   Factor:ONE
 
 List pipelines with explicit host
-    ${output} =         Execute          ozone admin pipeline list --scm scm
+    ${output} =         Execute          ozone admin pipeline list --scm ${SCM}
                         Should contain   ${output}   Factor:ONE
 
 Deactivate pipeline
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot
index 449d82e..cd4e63f 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot
@@ -19,6 +19,9 @@
 Resource            ../commonlib.robot
 Test Timeout        5 minutes
 
+*** Variables ***
+${SCM}       scm
+
 *** Test Cases ***
 Check replicationmanager
     ${output} =         Execute          ozone admin replicationmanager status
@@ -26,7 +29,7 @@
                         Should contain   ${output}   Running
 
 Check replicationmanager with explicit host
-    ${output} =         Execute          ozone admin replicationmanager status --scm scm
+    ${output} =         Execute          ozone admin replicationmanager status --scm ${SCM}
                         Should contain   ${output}   ReplicationManager
                         Should contain   ${output}   Running
 
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot
index 114d846..5515ffc 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot
@@ -19,13 +19,16 @@
 Resource            ../commonlib.robot
 Test Timeout        5 minutes
 
+*** Variables ***
+${SCM}       scm
+
 *** Test Cases ***
 Check safemode
     ${output} =         Execute          ozone admin safemode status
                         Should contain   ${output}   SCM is out of safe mode
 
 Check safemode with explicit host
-    ${output} =         Execute          ozone admin safemode status --scm scm
+    ${output} =         Execute          ozone admin safemode status --scm ${SCM}
                         Should contain   ${output}   SCM is out of safe mode
 
 Wait for safemode exit
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot
new file mode 100644
index 0000000..4d7c232
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Smoketest ozone cluster startup
+Library             OperatingSystem
+Library             BuiltIn
+Resource            ../commonlib.robot
+Test Timeout        5 minutes
+
+*** Variables ***
+
+*** Test Cases ***
+Run scm roles
+    ${output} =         Execute          ozone admin scm roles
+                        Should contain   ${output}  [scm:9865]
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
index b059f28..bf70837 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
@@ -20,14 +20,14 @@
 Test Timeout        5 minutes
 
 *** Variables ***
-${DATANODE_HOST}        datanode
+${SCM}          scm
 
 
 *** Test Cases ***
 
 Check webui static resources
     Run Keyword if    '${SECURITY_ENABLED}' == 'true'    Kinit HTTP user
-    ${result} =        Execute                curl --negotiate -u : -s -I http://scm:9876/static/bootstrap-3.4.1/js/bootstrap.min.js
+    ${result} =        Execute                curl --negotiate -u : -s -I http://${SCM}:9876/static/bootstrap-3.4.1/js/bootstrap.min.js
                        Should contain         ${result}    200
 
 Start freon testing
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/links.robot b/hadoop-ozone/dist/src/main/smoketest/basic/links.robot
index 71c046e..6efa3d7 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/links.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/links.robot
@@ -24,6 +24,7 @@
 
 *** Variables ***
 ${prefix}    generated
+${SCM}       scm
 
 *** Keywords ***
 Create volumes
@@ -41,11 +42,11 @@
     Execute             ozone sh bucket link ${source}/readable-bucket ${target}/readable-link
     Execute             ozone sh bucket link ${source}/readable-bucket ${target}/unreadable-link
     Execute             ozone sh bucket link ${source}/unreadable-bucket ${target}/link-to-unreadable-bucket
-    Execute             ozone sh volume addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target}
-    Execute             ozone sh volume addacl --acl user:testuser2/scm@EXAMPLE.COM:rl ${source}
-    Execute             ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:rl ${source}/readable-bucket
-    Execute             ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target}/readable-link
-    Execute             ozone sh bucket addacl --acl user:testuser2/scm@EXAMPLE.COM:r ${target}/link-to-unreadable-bucket
+    Execute             ozone sh volume addacl --acl user:testuser2/${SCM}@EXAMPLE.COM:r ${target}
+    Execute             ozone sh volume addacl --acl user:testuser2/${SCM}@EXAMPLE.COM:rl ${source}
+    Execute             ozone sh bucket addacl --acl user:testuser2/${SCM}@EXAMPLE.COM:rl ${source}/readable-bucket
+    Execute             ozone sh bucket addacl --acl user:testuser2/${SCM}@EXAMPLE.COM:r ${target}/readable-link
+    Execute             ozone sh bucket addacl --acl user:testuser2/${SCM}@EXAMPLE.COM:r ${target}/link-to-unreadable-bucket
 
 Can follow link with read access
     Execute             kdestroy
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot
index 8c9d5e5..793b553 100644
--- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot
@@ -20,6 +20,7 @@
 
 *** Variables ***
 ${prefix}    generated
+${SCM}       scm
 
 *** Keywords ***
 
@@ -96,7 +97,7 @@
     ${result} =     Execute             ozone sh volume removeacl ${protocol}${server}/${volume} -a user:superuser1:xy
     ${result} =     Execute             ozone sh volume getacl ${protocol}${server}/${volume}
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" .
-    ${result} =     Execute             ozone sh volume setacl ${protocol}${server}/${volume} -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
+    ${result} =     Execute             ozone sh volume setacl ${protocol}${server}/${volume} -al user:superuser1:rwxy,group:superuser1:a,user:testuser/${SCM}@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
     ${result} =     Execute             ozone sh volume getacl ${protocol}${server}/${volume}
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" .
     Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" .
@@ -112,7 +113,7 @@
     ${result} =     Execute             ozone sh bucket removeacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:xy
     ${result} =     Execute             ozone sh bucket getacl ${protocol}${server}/${volume}/bb1
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh bucket setacl ${protocol}${server}/${volume}/bb1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
+    ${result} =     Execute             ozone sh bucket setacl ${protocol}${server}/${volume}/bb1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/${SCM}@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
     ${result} =     Execute             ozone sh bucket getacl ${protocol}${server}/${volume}/bb1
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
     Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" .
@@ -163,7 +164,7 @@
     ${result} =     Execute             ozone sh key removeacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:xy
     ${result} =     Execute             ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh key setacl ${protocol}${server}/${volume}/bb1/key2 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc
+    ${result} =     Execute             ozone sh key setacl ${protocol}${server}/${volume}/bb1/key2 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/${SCM}@EXAMPLE.COM:rwxyc
     ${result} =     Execute             ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
     Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" .
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
index 453ba51..8b8220c 100644
--- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
@@ -97,7 +97,7 @@
                    Execute               ozone fs -rm ${DEEP_URL}/testFile.txt
     ${result} =    Execute               ozone fs -ls -R ${BASE_URL}/
                    Should not contain    ${result}     ${DEEP_URL}/testFile.txt
-                   Should Contain Any    ${result}     .Trash/hadoop    .Trash/testuser/scm@EXAMPLE.COM    .Trash/root
+                   Should Contain Any    ${result}     .Trash/hadoop    .Trash/testuser    .Trash/root
                    Should contain        ${result}     ${DEEP_DIR}/testFile.txt
 
 Delete recursively
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot
index 0d417fb..be50d30 100644
--- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/setup.robot
@@ -59,7 +59,7 @@
     ${result} =         Execute               ozone sh volume list
                         Should contain        ${result}               ${VOLUME}
                         Should contain        ${result}               ${VOL2}
-                        Should Match Regexp   ${result}               "admin" : "(hadoop|testuser\/scm@EXAMPLE\.COM)"
+                        Should Match Regexp   ${result}               "admin" : "(hadoop|testuser\/scm[^@]*@EXAMPLE\.COM)"
     ${result} =         Execute               ozone sh bucket list ${VOLUME}
                         Should contain        ${result}               ${BUCKET}
                         Should contain        ${result}               ${BUCKET2}
diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
index 369fa44..b587cc6 100644
--- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot
@@ -23,6 +23,7 @@
 
 *** Variables ***
 ${ENDPOINT_URL}    http://s3g:9878
+${SCM}             scm
 
 *** Keywords ***
 Setup volume names
@@ -83,7 +84,7 @@
     ${result} =     Execute             ozone sh volume removeacl ${volume3} -a user:superuser1:xy
     ${result} =     Execute             ozone sh volume getacl ${volume3}
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh volume setacl ${volume3} -al user:superuser1:rwxy,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
+    ${result} =     Execute             ozone sh volume setacl ${volume3} -al user:superuser1:rwxy,user:testuser/${SCM}@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
     ${result} =     Execute             ozone sh volume getacl ${volume3}
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
     Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\"
@@ -99,7 +100,7 @@
     ${result} =     Execute             ozone sh bucket removeacl ${volume3}/bk1 -a user:superuser1:xy
     ${result} =     Execute             ozone sh bucket getacl ${volume3}/bk1
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh bucket setacl ${volume3}/bk1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
+    ${result} =     Execute             ozone sh bucket setacl ${volume3}/bk1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/${SCM}@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT]
     ${result} =     Execute             ozone sh bucket getacl ${volume3}/bk1
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
     Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\"
@@ -114,7 +115,7 @@
     ${result} =     Execute             ozone sh key removeacl ${volume3}/bk1/key1 -a user:superuser1:xy
     ${result} =     Execute             ozone sh key getacl ${volume3}/bk1/key1
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\"
-    ${result} =     Execute             ozone sh key setacl ${volume3}/bk1/key1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc
+    ${result} =     Execute             ozone sh key setacl ${volume3}/bk1/key1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/${SCM}@EXAMPLE.COM:rwxyc
     ${result} =     Execute             ozone sh key getacl ${volume3}/bk1/key1
     Should Match Regexp                 ${result}       \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\"
     Should Match Regexp                 ${result}       \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\"
@@ -127,16 +128,16 @@
                     Should contain      ${result}    PERMISSION_DENIED
     ${result} =     Execute And Ignore Error         ozone sh key list /${volume3}/bk1      
                     Should contain      ${result}    PERMISSION_DENIED
-    ${result} =     Execute And Ignore Error         ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:xy
-                    Should contain      ${result}    PERMISSION_DENIED User testuser2/scm@EXAMPLE.COM doesn't have WRITE_ACL permission to access volume
+    ${result} =     Execute And Ignore Error         ozone sh volume addacl ${volume3} -a user:testuser2/${SCM}@EXAMPLE.COM:xy
+                    Should contain      ${result}    PERMISSION_DENIED User testuser2/${SCM}@EXAMPLE.COM doesn't have WRITE_ACL permission to access volume
     Execute         kdestroy
     Run Keyword     Kinit test user     testuser     testuser.keytab
-    Execute         ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:xyrw
+    Execute         ozone sh volume addacl ${volume3} -a user:testuser2/${SCM}@EXAMPLE.COM:xyrw
     Execute         kdestroy
     Run Keyword     Kinit test user     testuser2    testuser2.keytab
     ${result} =     Execute And Ignore Error         ozone sh bucket list /${volume3}/
-                    Should contain      ${result}    PERMISSION_DENIED User testuser2/scm@EXAMPLE.COM doesn't have LIST permission to access volume
-    Execute         ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:l
+                    Should contain      ${result}    PERMISSION_DENIED User testuser2/${SCM}@EXAMPLE.COM doesn't have LIST permission to access volume
+    Execute         ozone sh volume addacl ${volume3} -a user:testuser2/${SCM}@EXAMPLE.COM:l
     Execute         ozone sh bucket list /${volume3}/
     Execute         ozone sh volume getacl /${volume3}/
     
@@ -144,7 +145,7 @@
     Should contain      ${result}    PERMISSION_DENIED
     Execute         kdestroy
     Run Keyword     Kinit test user     testuser     testuser.keytab
-    Execute         ozone sh bucket addacl ${volume3}/bk1 -a user:testuser2/scm@EXAMPLE.COM:a
+    Execute         ozone sh bucket addacl ${volume3}/bk1 -a user:testuser2/${SCM}@EXAMPLE.COM:a
     Execute         ozone sh bucket getacl /${volume3}/bk1
     Execute         kdestroy
     Run Keyword     Kinit test user     testuser2    testuser2.keytab
diff --git a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot
index e8bd525..4a92bd7 100644
--- a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot
@@ -27,12 +27,13 @@
 ${OM_DB_CHECKPOINT_URL}      ${OM_URL}/dbCheckpoint
 ${OM_SERVICE_LIST_URL}       ${OM_URL}/serviceList
 
-${SCM_URL}          http://scm:9876
+${SCM}              scm
+${SCM_URL}          http://${SCM}:9876
 ${RECON_URL}        http://recon:9888
 
-${SCM_CONF_URL}     http://scm:9876/conf
-${SCM_JMX_URL}      http://scm:9876/jmx
-${SCM_STACKS_URL}   http://scm:9876/stacks
+${SCM_CONF_URL}     http://${SCM}:9876/conf
+${SCM_JMX_URL}      http://${SCM}:9876/jmx
+${SCM_STACKS_URL}   http://${SCM}:9876/stacks
 
 
 *** Keywords ***
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
index 2137416..1fd3b01 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
@@ -50,6 +50,12 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-server-scm</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-hadoop-dependency-test</artifactId>
       <scope>test</scope>
     </dependency>
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 13cabe0..5d9cf14 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -58,24 +58,32 @@
 
   private final int numDatanodes;
   private final int numOzoneManagers;
+  private final int numStorageContainerManagers;
 
   private final FailureManager failureManager;
 
   private final int waitForClusterToBeReadyTimeout = 120000; // 2 min
 
   private final Set<OzoneManager> failedOmSet;
+  private final Set<StorageContainerManager> failedScmSet;
   private final Set<DatanodeDetails> failedDnSet;
 
   // The service on which chaos will be unleashed.
   enum FailureService {
     DATANODE,
-    OZONE_MANAGER;
+    OZONE_MANAGER,
+    STORAGE_CONTAINER_MANAGER;
 
     public String toString() {
-      if (this == DATANODE) {
+      switch (this) {
+      case DATANODE:
         return "Datanode";
-      } else {
+      case OZONE_MANAGER:
         return "OzoneManager";
+      case STORAGE_CONTAINER_MANAGER:
+        return "StorageContainerManager";
+      default:
+        return "";
       }
     }
 
@@ -84,6 +92,8 @@
         return DATANODE;
       } else if (serviceName.equalsIgnoreCase("OzoneManager")) {
         return OZONE_MANAGER;
+      } else if (serviceName.equalsIgnoreCase("StorageContainerManager")) {
+        return STORAGE_CONTAINER_MANAGER;
       }
       throw new IllegalArgumentException("Unrecognized value for " +
           "FailureService enum: " + serviceName);
@@ -91,15 +101,17 @@
   }
 
   public MiniOzoneChaosCluster(OzoneConfiguration conf,
-      List<OzoneManager> ozoneManagers, StorageContainerManager scm,
+      List<OzoneManager> ozoneManagers, List<StorageContainerManager> scms,
       List<HddsDatanodeService> hddsDatanodes, String omServiceID,
-      Set<Class<? extends Failures>> clazzes) {
-    super(conf, ozoneManagers, scm, hddsDatanodes, omServiceID);
+      String scmServiceId, Set<Class<? extends Failures>> clazzes) {
+    super(conf, ozoneManagers, scms, hddsDatanodes, omServiceID, scmServiceId);
     this.numDatanodes = getHddsDatanodes().size();
     this.numOzoneManagers = ozoneManagers.size();
+    this.numStorageContainerManagers = scms.size();
 
     this.failedOmSet = new HashSet<>();
     this.failedDnSet = new HashSet<>();
+    this.failedScmSet = new HashSet<>();
 
     this.failureManager = new FailureManager(this, conf, clazzes);
     LOG.info("Starting MiniOzoneChaosCluster with {} OzoneManagers and {} " +
@@ -109,8 +121,9 @@
 
   void startChaos(long initialDelay, long period, TimeUnit timeUnit) {
     LOG.info("Starting Chaos with failure period:{} unit:{} numDataNodes:{} " +
-            "numOzoneManagers:{}", period, timeUnit, numDatanodes,
-        numOzoneManagers);
+            "numOzoneManagers:{} numStorageContainerManagers:{}",
+        period, timeUnit, numDatanodes,
+        numOzoneManagers, numStorageContainerManagers);
     failureManager.start(initialDelay, period, timeUnit);
   }
 
@@ -193,6 +206,20 @@
       return this;
     }
 
+    /**
+     * Sets SCM Service ID.
+     */
+    public Builder setSCMServiceID(String scmServiceID) {
+      super.setSCMServiceId(scmServiceID);
+      return this;
+    }
+
+    public Builder setNumStorageContainerManagers(int val) {
+      super.setNumOfStorageContainerManagers(val);
+      super.setNumOfActiveSCMs(val);
+      return this;
+    }
+
     public Builder addFailures(Class<? extends Failures> clazz) {
       this.clazzes.add(clazz);
       return this;
@@ -271,11 +298,16 @@
         initOMRatisConf();
       }
 
-      StorageContainerManager scm;
       List<OzoneManager> omList;
+      List<StorageContainerManager> scmList;
       try {
-        scm = createSCM();
-        scm.start();
+        if (numOfSCMs > 1) {
+          scmList = createSCMService();
+        } else {
+          StorageContainerManager scm = createSCM();
+          scm.start();
+          scmList = Arrays.asList(scm);
+        }
         if (numOfOMs > 1) {
           omList = createOMService();
         } else {
@@ -288,11 +320,11 @@
       }
 
       final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(
-          scm, null);
+          scmList, null);
 
       MiniOzoneChaosCluster cluster =
-          new MiniOzoneChaosCluster(conf, omList, scm, hddsDatanodes,
-              omServiceId, clazzes);
+          new MiniOzoneChaosCluster(conf, omList, scmList, hddsDatanodes,
+              omServiceId, scmServiceId, clazzes);
 
       if (startDataNodes) {
         cluster.startHddsDatanodes();
@@ -335,7 +367,7 @@
   }
 
   // Should the selected node be stopped or started.
-  public boolean shouldStop() {
+  public boolean shouldStopOm() {
     if (failedOmSet.size() >= numOzoneManagers/2) {
       return false;
     }
@@ -376,4 +408,45 @@
   public boolean shouldStop(DatanodeDetails dn) {
     return !failedDnSet.contains(dn);
   }
+
+  // StorageContainerManager specific
+  public static int getNumberOfScmToFail() {
+    return 1;
+  }
+
+  public Set<StorageContainerManager> scmToFail() {
+    int numNodesToFail = getNumberOfScmToFail();
+    if (failedScmSet.size() >= numStorageContainerManagers/2) {
+      return Collections.emptySet();
+    }
+
+    int numSCMs = getStorageContainerManagersList().size();
+    Set<StorageContainerManager> scms = new HashSet<>();
+    for (int i = 0; i < numNodesToFail; i++) {
+      int failedNodeIndex = FailureManager.getBoundedRandomIndex(numSCMs);
+      scms.add(getStorageContainerManager(failedNodeIndex));
+    }
+    return scms;
+  }
+
+  public void shutdownStorageContainerManager(StorageContainerManager scm) {
+    super.shutdownStorageContainerManager(scm);
+    failedScmSet.add(scm);
+  }
+
+  public void restartStorageContainerManager(StorageContainerManager scm,
+      boolean waitForScm) throws IOException, TimeoutException,
+      InterruptedException, AuthenticationException {
+    super.restartStorageContainerManager(scm, waitForScm);
+    failedScmSet.remove(scm);
+  }
+
+  // Should the selected node be stopped or started.
+  public boolean shouldStopScm() {
+    if (failedScmSet.size() >= numStorageContainerManagers/2) {
+      return false;
+    }
+    return RandomUtils.nextBoolean();
+  }
+
 }
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/OzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/OzoneChaosCluster.java
index 8c25827..6723e46 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/OzoneChaosCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/OzoneChaosCluster.java
@@ -31,7 +31,8 @@
     subcommands = {
         TestAllMiniChaosOzoneCluster.class,
         TestDatanodeMiniChaosOzoneCluster.class,
-        TestOzoneManagerMiniChaosOzoneCluster.class
+        TestOzoneManagerMiniChaosOzoneCluster.class,
+        TestStorageContainerManagerMiniChaosOzoneCluster.class
     },
     versionProvider = HddsVersionProvider.class,
     mixinStandardHelpOptions = true)
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java
index ea8c155..8232c40 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java
@@ -40,7 +40,7 @@
 
   @Override
   public Void call() throws Exception {
-    setNumOzoneManagers(3, true);
+    setNumManagers(3, 3, true);
 
     LoadGenerator.getClassList().forEach(
         TestMiniChaosOzoneCluster::addLoadClasses);
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestDatanodeMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestDatanodeMiniChaosOzoneCluster.java
index d3f2b2d..a44a741 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestDatanodeMiniChaosOzoneCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestDatanodeMiniChaosOzoneCluster.java
@@ -30,7 +30,7 @@
  * Test Datanode with Chaos.
  */
 @CommandLine.Command(
-    name = "datanode",
+    name = "dn",
     description = "run chaos cluster across Ozone Datanodes",
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
index e2c059d..109eadd 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java
@@ -59,6 +59,13 @@
           " be removed in later versions.")
   private static int numOzoneManagers = 1;
 
+  @Option(names = {"-s", "--num-storage-container-manager",
+      "--numStorageContainerManagers"},
+      description = "num of storageContainerManagers." +
+          "Full name --numStorageContainerManagers will" +
+          " be removed in later versions.")
+  private static int numStorageContainerManagerss = 1;
+
   @Option(names = {"-t", "--num-threads", "--numThreads"},
       description = "num of IO threads. Full name --numThreads will be" +
           " removed in later versions.")
@@ -88,8 +95,10 @@
   private static MiniOzoneLoadGenerator loadGenerator;
 
   private static String omServiceId = null;
+  private static String scmServiceId = null;
 
   private static final String OM_SERVICE_ID = "ozoneChaosTest";
+  private static final String SCM_SERVICE_ID = "scmChaosTest";
 
   @BeforeClass
   public static void init() throws Exception {
@@ -102,6 +111,8 @@
         .setNumDatanodes(numDatanodes)
         .setNumOzoneManagers(numOzoneManagers)
         .setOMServiceID(omServiceId)
+        .setNumStorageContainerManagers(numStorageContainerManagerss)
+        .setSCMServiceID(scmServiceId)
         .setNumDataVolumes(numDataVolumes);
     failureClasses.forEach(chaosBuilder::addFailures);
 
@@ -136,12 +147,17 @@
     numDatanodes = nDns;
   }
 
-  static void setNumOzoneManagers(int nOms, boolean enableHA) {
+  static void setNumManagers(int nOms, int numScms, boolean enableHA) {
 
     if (nOms > 1 || enableHA) {
       omServiceId = OM_SERVICE_ID;
     }
     numOzoneManagers = nOms;
+
+    if (numScms > 1 || enableHA) {
+      scmServiceId = SCM_SERVICE_ID;
+    }
+    numStorageContainerManagerss = numScms;
   }
 
   /**
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestOzoneManagerMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestOzoneManagerMiniChaosOzoneCluster.java
index c8fbed3..abf4a82 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestOzoneManagerMiniChaosOzoneCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestOzoneManagerMiniChaosOzoneCluster.java
@@ -31,7 +31,7 @@
  * Chaos cluster for Ozone Manager.
  */
 @CommandLine.Command(
-    name = "ozonemanager",
+    name = "om",
     description = "run chaos cluster across Ozone Managers",
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
@@ -40,7 +40,7 @@
 
   @Override
   public Void call() throws Exception {
-    setNumOzoneManagers(3, true);
+    setNumManagers(3, 1, true);
     setNumDatanodes(3);
 
     addLoadClasses(AgedDirLoadGenerator.class);
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerMiniChaosOzoneCluster.java
new file mode 100644
index 0000000..49f6d70
--- /dev/null
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerMiniChaosOzoneCluster.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.ozone.failure.Failures;
+import org.apache.hadoop.ozone.loadgenerators.AgedDirLoadGenerator;
+import org.apache.hadoop.ozone.loadgenerators.NestedDirLoadGenerator;
+import org.apache.hadoop.ozone.loadgenerators.RandomDirLoadGenerator;
+import picocli.CommandLine;
+
+import java.util.concurrent.Callable;
+
+/**
+ * Chaos cluster for Storage Container Manager.
+ */
+@CommandLine.Command(
+    name = "scm",
+    description = "run chaos cluster across Storage Container Managers",
+    mixinStandardHelpOptions = true,
+    versionProvider = HddsVersionProvider.class)
+public class TestStorageContainerManagerMiniChaosOzoneCluster extends
+    TestMiniChaosOzoneCluster implements Callable<Void> {
+
+  @Override
+  public Void call() throws Exception {
+    setNumManagers(3, 3, true);
+    setNumDatanodes(3);
+
+    addLoadClasses(AgedDirLoadGenerator.class);
+    addLoadClasses(RandomDirLoadGenerator.class);
+    addLoadClasses(NestedDirLoadGenerator.class);
+
+    addFailureClasses(Failures.StorageContainerManagerRestartFailure.class);
+    addFailureClasses(Failures.StorageContainerManagerStartStopFailure.class);
+
+    startChaosCluster();
+    return null;
+  }
+
+}
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/Failures.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/Failures.java
index 678720b..a5b81ba 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/Failures.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/Failures.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.failure;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneChaosCluster;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.slf4j.Logger;
@@ -50,6 +51,8 @@
     classList.add(OzoneManagerStartStopFailure.class);
     classList.add(DatanodeRestartFailure.class);
     classList.add(DatanodeStartStopFailure.class);
+    classList.add(StorageContainerManagerStartStopFailure.class);
+    classList.add(StorageContainerManagerRestartFailure.class);
 
     return classList;
   }
@@ -95,7 +98,7 @@
     @Override
     public void fail(MiniOzoneChaosCluster cluster) {
       // Get the number of OzoneManager to fail in the cluster.
-      boolean shouldStop = cluster.shouldStop();
+      boolean shouldStop = cluster.shouldStopOm();
       Set<OzoneManager> oms = cluster.omToFail();
       oms.parallelStream().forEach(om -> {
         try {
@@ -113,6 +116,66 @@
   }
 
   /**
+   * Ozone Manager failures.
+   */
+  public abstract static class ScmFailures extends Failures {
+    @Override
+    public void validateFailure(MiniOzoneChaosCluster cluster) {
+      if (cluster.getStorageContainerManagersList().size() < 3) {
+        throw new IllegalArgumentException("Not enough number of " +
+            "StorageContainerManagers to test chaos on" +
+            "StorageContainerManagers. Set number of " +
+            "StorageContainerManagers to at least 3");
+      }
+    }
+  }
+
+  /**
+   * Start/Stop Ozone Manager to induce failure.
+   */
+  public static class StorageContainerManagerStartStopFailure
+      extends ScmFailures {
+    public void fail(MiniOzoneChaosCluster cluster) {
+      // Get the number of OzoneManager to fail in the cluster.
+      boolean shouldStop = cluster.shouldStopScm();
+      Set<StorageContainerManager> scms = cluster.scmToFail();
+      scms.parallelStream().forEach(scm -> {
+        try {
+          if (shouldStop) {
+            // start another OM before failing the next one.
+            cluster.shutdownStorageContainerManager(scm);
+          } else {
+            cluster.restartStorageContainerManager(scm, true);
+          }
+        } catch (Throwable t) {
+          LOG.error("Failed to shutdown OM {}", scm, t);
+        }
+      });
+    }
+  }
+
+  /**
+   * Start/Stop Ozone Manager to induce failure.
+   */
+  public static class StorageContainerManagerRestartFailure
+      extends ScmFailures {
+    @Override
+    public void fail(MiniOzoneChaosCluster cluster) {
+      boolean failureMode = FailureManager.isFastRestart();
+      Set<StorageContainerManager> scms = cluster.scmToFail();
+      scms.parallelStream().forEach(scm -> {
+        try {
+          cluster.shutdownStorageContainerManager(scm);
+          cluster.restartStorageContainerManager(scm, failureMode);
+          cluster.waitForClusterToBeReady();
+        } catch (Throwable t) {
+          LOG.error("Failed to restartNodes SCM {}", scm, t);
+        }
+      });
+    }
+  }
+
+  /**
    * Datanode failures.
    */
   public abstract static class DatanodeFailures extends Failures {
diff --git a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
index 0bf7ea4..b089712 100644
--- a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
@@ -128,4 +128,12 @@
     <Class name="org.apache.hadoop.ozone.client.rpc.TestDeleteWithSlowFollower"/>
     <Bug pattern="ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" />
   </Match>
+  <Match>
+    <Class name="org.apache.hadoop.hdds.scm.TestSCMInstallSnapshot"/>
+    <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.ozone.scm.TestSCMInstallSnapshotWithHA"/>
+    <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
+  </Match>
 </FindBugsFilter>
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
index a86a9aa..9582a17 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -37,8 +37,8 @@
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.junit.After;
 import org.junit.Assert;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
index baee60c..e13b50e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
@@ -26,12 +26,12 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMStorage;
 import org.apache.hadoop.ozone.om.OzoneManager;
@@ -107,7 +107,7 @@
     omStore.initialize();
 
     // Start the cluster
-    cluster = MiniOzoneCluster.newHABuilder(conf)
+    cluster = MiniOzoneCluster.newOMHABuilder(conf)
         .setNumDatanodes(7)
         .setTotalPipelineNumLimit(10)
         .setClusterId(clusterId)
@@ -154,13 +154,13 @@
    * @return the leader OM's RPC address in the MiniOzoneHACluster
    */
   private String getLeaderOMNodeAddr() {
-    MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster;
+    MiniOzoneOMHAClusterImpl haCluster = (MiniOzoneOMHAClusterImpl) cluster;
     OzoneManager omLeader = haCluster.getOMLeader();
     Assert.assertNotNull("There should be a leader OM at this point.",
         omLeader);
     String omNodeId = omLeader.getOMNodeId();
     // omLeaderAddrKey=ozone.om.address.omServiceId.omNodeId
-    String omLeaderAddrKey = OmUtils.addKeySuffixes(
+    String omLeaderAddrKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId);
     String omLeaderAddr = conf.get(omLeaderAddrKey);
     LOG.info("OM leader: nodeId={}, {}={}", omNodeId, omLeaderAddrKey,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
new file mode 100644
index 0000000..f3682ed
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
+import org.apache.hadoop.hdds.scm.ha.SCMHAConfiguration;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
+import org.apache.hadoop.hdds.scm.ha.SCMSnapshotProvider;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.utils.HAUtils;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+
+/**
+ * Class to test install snapshot feature for SCM HA.
+ */
+public class TestSCMInstallSnapshot {
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
+    SCMHAConfiguration scmhaConfiguration = conf.getObject(
+        SCMHAConfiguration.class);
+    scmhaConfiguration.setRatisSnapshotThreshold(1L);
+    scmhaConfiguration.setRatisSnapshotDir(
+        GenericTestUtils.getRandomizedTempPath() + "/snapshot");
+    conf.setFromObject(scmhaConfiguration);
+    cluster = MiniOzoneCluster
+        .newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @AfterClass
+  public static void shutdown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testDownloadSnapshot() throws Exception {
+    downloadSnapshot();
+  }
+
+  private DBCheckpoint downloadSnapshot() throws Exception {
+    StorageContainerManager scm = cluster.getStorageContainerManager();
+    ContainerManagerV2 containerManager = scm.getContainerManager();
+    PipelineManager pipelineManager = scm.getPipelineManager();
+    Pipeline ratisPipeline1 = pipelineManager.getPipeline(
+        containerManager.allocateContainer(
+            RATIS, THREE, "Owner1").getPipelineID());
+    pipelineManager.openPipeline(ratisPipeline1.getId());
+    Pipeline ratisPipeline2 = pipelineManager.getPipeline(
+        containerManager.allocateContainer(
+            RATIS, ONE, "Owner2").getPipelineID());
+    pipelineManager.openPipeline(ratisPipeline2.getId());
+    SCMNodeDetails scmNodeDetails = new SCMNodeDetails.Builder()
+        .setRpcAddress(new InetSocketAddress("0.0.0.0", 0)).setSCMNodeId("scm1")
+        .build();
+    Map<String, SCMNodeDetails> peerMap = new HashMap<>();
+    peerMap.put(scmNodeDetails.getNodeId(), scmNodeDetails);
+    SCMSnapshotProvider provider =
+        scm.getScmHAManager().getSCMSnapshotProvider();
+    provider.setPeerNodesMap(peerMap);
+    DBCheckpoint checkpoint =
+        provider.getSCMDBSnapshot(scmNodeDetails.getNodeId());
+    final File[] files = FileUtil.listFiles(provider.getScmSnapshotDir());
+    Assert.assertTrue(files[0].getName().startsWith(
+        OzoneConsts.SCM_DB_NAME + "-" + scmNodeDetails.getNodeId()));
+    return checkpoint;
+  }
+
+  @Test
+  public void testInstallCheckPoint() throws Exception {
+    DBCheckpoint checkpoint = downloadSnapshot();
+    StorageContainerManager scm = cluster.getStorageContainerManager();
+    SCMHAManagerImpl scmhaManager = (SCMHAManagerImpl)scm.getScmHAManager();
+    DBStore db = HAUtils
+        .loadDB(conf, checkpoint.getCheckpointLocation().getParent().toFile(),
+            checkpoint.getCheckpointLocation().getFileName().toString(),
+            new SCMDBDefinition());
+    // Hack the transaction index in the checkpoint so as to ensure the
+    // checkpointed transaction index is higher than when it was downloaded
+    // from.
+    Assert.assertNotNull(db);
+    HAUtils.getTransactionInfoTable(db, new SCMDBDefinition())
+        .put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.builder()
+            .setCurrentTerm(1).setTransactionIndex(100).build());
+    db.close();
+    ContainerID cid =
+        scm.getContainerManager().getContainers().get(0).containerID();
+    PipelineID pipelineID =
+        scm.getPipelineManager().getPipelines().get(0).getId();
+    scm.getScmMetadataStore().getPipelineTable().delete(pipelineID);
+    scm.getContainerManager().deleteContainer(cid);
+    Assert.assertNull(
+        scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
+    Assert.assertFalse(scm.getContainerManager().containerExist(cid));
+    scmhaManager.installCheckpoint("scm1", checkpoint);
+
+    Assert.assertNotNull(
+        scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
+    Assert.assertNotNull(
+        scm.getScmMetadataStore().getContainerTable().get(cid));
+    Assert.assertTrue(scm.getPipelineManager().containsPipeline(pipelineID));
+    Assert.assertTrue(scm.getContainerManager().containerExist(cid));
+    Assert.assertEquals(100, scm.getScmMetadataStore().
+        getTransactionInfoTable().get(OzoneConsts.TRANSACTION_INFO_KEY)
+        .getTransactionIndex());
+    Assert.assertEquals(100,
+        scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo()
+            .getTermIndex().getIndex());
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
new file mode 100644
index 0000000..0c3fbcc
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
+import org.apache.hadoop.hdds.scm.ha.SCMHAConfiguration;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+
+public class TestSCMSnapshot {
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
+    SCMHAConfiguration scmhaConfiguration = conf.getObject(
+        SCMHAConfiguration.class);
+    scmhaConfiguration.setRatisSnapshotThreshold(1L);
+    conf.setFromObject(scmhaConfiguration);
+    cluster = MiniOzoneCluster
+        .newBuilder(conf)
+        .setNumDatanodes(3)
+        .setScmId(UUID.randomUUID().toString())
+        .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @Test
+  public void testSnapshot() throws Exception {
+    StorageContainerManager scm = cluster.getStorageContainerManager();
+    long snapshotInfo1 = scm.getScmHAManager().asSCMHADBTransactionBuffer()
+        .getLatestTrxInfo().getTransactionIndex();
+    ContainerManagerV2 containerManager = scm.getContainerManager();
+    PipelineManager pipelineManager = scm.getPipelineManager();
+    Pipeline ratisPipeline1 = pipelineManager.getPipeline(
+        containerManager.allocateContainer(
+            RATIS, THREE, "Owner1").getPipelineID());
+    pipelineManager.openPipeline(ratisPipeline1.getId());
+    Pipeline ratisPipeline2 = pipelineManager.getPipeline(
+        containerManager.allocateContainer(
+            RATIS, ONE, "Owner2").getPipelineID());
+    pipelineManager.openPipeline(ratisPipeline2.getId());
+    long snapshotInfo2 = scm.getScmHAManager().asSCMHADBTransactionBuffer()
+        .getLatestTrxInfo().getTransactionIndex();
+
+    Assert.assertTrue(
+        String.format("Snapshot index 2 {} should greater than Snapshot " +
+            "index 1 {}", snapshotInfo2, snapshotInfo1),
+        snapshotInfo2 > snapshotInfo1);
+
+    cluster.restartStorageContainerManager(false);
+    TransactionInfo trxInfoAfterRestart =
+        scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo();
+    Assert.assertTrue(
+        trxInfoAfterRestart.getTransactionIndex() >= snapshotInfo2);
+    try {
+      pipelineManager.getPipeline(ratisPipeline1.getId());
+      pipelineManager.getPipeline(ratisPipeline2.getId());
+    } catch (PipelineNotFoundException e) {
+      Assert.fail("Should not see a PipelineNotFoundException");
+    }
+  }
+
+  @AfterClass
+  public static void shutdown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index 3842818..2768225 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.junit.After;
@@ -64,7 +65,7 @@
   private MiniOzoneCluster cluster;
   private XceiverClientManager xceiverClientManager;
   private StorageContainerManager scm;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private ContainerStateManager containerStateManager;
   private int numContainerPerOwnerInPipeline;
 
@@ -151,7 +152,8 @@
 
   @Test
   public void testContainerStateManagerRestart() throws IOException,
-      TimeoutException, InterruptedException, AuthenticationException {
+      TimeoutException, InterruptedException, AuthenticationException,
+      InvalidStateTransitionException {
     // Allocate 5 containers in ALLOCATED state and 5 in CREATING state
 
     for (int i = 0; i < 10; i++) {
@@ -172,7 +174,7 @@
     cluster.restartStorageContainerManager(false);
 
     List<ContainerInfo> result = cluster.getStorageContainerManager()
-        .getContainerManager().listContainer(null, 100);
+        .getContainerManager().getContainers(null, 100);
 
     long matchCount = result.stream()
         .filter(info ->
@@ -252,7 +254,7 @@
     ContainerInfo info = containerManager
         .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline(),
-            new HashSet<>(Collections.singletonList(new ContainerID(1))));
+            new HashSet<>(Collections.singletonList(ContainerID.valueOf(1))));
     Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
         info.getContainerID());
   }
@@ -277,8 +279,8 @@
     ContainerInfo info = containerManager
         .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
             container1.getPipeline(),
-            new HashSet<>(Arrays.asList(new ContainerID(1), new
-                ContainerID(2), new ContainerID(3))));
+            new HashSet<>(Arrays.asList(ContainerID.valueOf(1),
+                ContainerID.valueOf(2), ContainerID.valueOf(3))));
     Assert.assertEquals(info.getContainerID(), 4);
   }
 
@@ -325,7 +327,8 @@
   }
 
   @Test
-  public void testUpdateContainerState() throws IOException {
+  public void testUpdateContainerState() throws IOException,
+      InvalidStateTransitionException {
     NavigableSet<ContainerID> containerList = containerStateManager
         .getMatchingContainerIDs(OzoneConsts.OZONE,
             SCMTestUtils.getReplicationType(conf),
@@ -418,7 +421,7 @@
         .setUuid(UUID.randomUUID()).build();
 
     // Test 1: no replica's exist
-    ContainerID containerID = ContainerID.valueof(RandomUtils.nextLong());
+    ContainerID containerID = ContainerID.valueOf(RandomUtils.nextLong());
     Set<ContainerReplica> replicaSet;
     try {
       containerStateManager.getContainerReplicas(containerID);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
index 21b5678..3484474 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
@@ -24,7 +24,8 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -82,7 +83,7 @@
   @Test
   public void testContainerOpsMetrics() throws IOException {
     MetricsRecordBuilder metrics;
-    ContainerManager containerManager = scm.getContainerManager();
+    ContainerManagerV2 containerManager = scm.getContainerManager();
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
     long numSuccessfulCreateContainers = getLongCounter(
         "NumSuccessfulCreateContainers", metrics);
@@ -114,19 +115,18 @@
         "NumSuccessfulDeleteContainers", metrics);
 
     containerManager.deleteContainer(
-        new ContainerID(containerInfo.getContainerID()));
+        ContainerID.valueOf(containerInfo.getContainerID()));
 
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
     Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
         metrics), numSuccessfulDeleteContainers + 1);
 
-
     try {
       // Give random container to delete.
       containerManager.deleteContainer(
-          new ContainerID(RandomUtils.nextLong(10000, 20000)));
+          ContainerID.valueOf(RandomUtils.nextLong(10000, 20000)));
       fail("testContainerOpsMetrics failed");
-    } catch (IOException ex) {
+    } catch (ContainerNotFoundException ex) {
       // Here it should fail, so it should have the old metric value.
       metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
       Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
@@ -135,11 +135,12 @@
           metrics), 1);
     }
 
-    containerManager.listContainer(
-        new ContainerID(containerInfo.getContainerID()), 1);
+    long currentValue = getLongCounter("NumListContainerOps", metrics);
+    containerManager.getContainers(
+        ContainerID.valueOf(containerInfo.getContainerID()), 1);
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(getLongCounter("NumListContainerOps",
-        metrics), 1);
+    Assert.assertEquals(currentValue + 1,
+        getLongCounter("NumListContainerOps", metrics));
 
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
index 08cc975..0ecad42 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
@@ -182,7 +182,7 @@
 
       int destroyNum = r.nextInt(pipelines.size());
       for (int k = 0; k <= destroyNum; k++) {
-        pipelineManager.finalizeAndDestroyPipeline(pipelines.get(k), false);
+        pipelineManager.closePipeline(pipelines.get(k), false);
       }
 
       waitForPipelines(pipelineNum);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index bd1badc..1ed7a73 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -22,11 +22,12 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -58,7 +59,7 @@
   private OzoneConfiguration conf;
   private StorageContainerManager scm;
   private ContainerWithPipeline ratisContainer;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private PipelineManager pipelineManager;
 
   /**
@@ -92,7 +93,8 @@
   }
 
   @Test
-  public void testPipelineMap() throws IOException {
+  public void testPipelineMap() throws IOException,
+      InvalidStateTransitionException {
 
     Set<ContainerID> set = pipelineManager
         .getContainersInPipeline(ratisContainer.getPipeline().getId());
@@ -121,7 +123,7 @@
     Assert.assertEquals(0, set2.size());
 
     pipelineManager
-        .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false);
+        .closePipeline(ratisContainer.getPipeline(), false);
     pipelines = scm.getScmNodeManager()
         .getPipelines(dns.get(0));
     Assert
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index f308891..aba5ab5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -27,15 +27,17 @@
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -74,7 +76,7 @@
   private OzoneConfiguration conf;
   private StorageContainerManager scm;
   private ContainerWithPipeline ratisContainer;
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private PipelineManager pipelineManager;
 
   private long pipelineDestroyTimeoutInMillis;
@@ -117,7 +119,8 @@
   }
 
   @Test
-  public void testPipelineCloseWithClosedContainer() throws IOException {
+  public void testPipelineCloseWithClosedContainer() throws IOException,
+      InvalidStateTransitionException {
     Set<ContainerID> set = pipelineManager
         .getContainersInPipeline(ratisContainer.getPipeline().getId());
 
@@ -137,7 +140,7 @@
     Assert.assertEquals(0, setClosed.size());
 
     pipelineManager
-        .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false);
+        .closePipeline(ratisContainer.getPipeline(), false);
     for (DatanodeDetails dn : ratisContainer.getPipeline().getNodes()) {
       // Assert that the pipeline has been removed from Node2PipelineMap as well
       Assert.assertFalse(scm.getScmNodeManager().getPipelines(dn)
@@ -153,7 +156,7 @@
     Assert.assertEquals(1, setOpen.size());
 
     pipelineManager
-        .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false);
+        .closePipeline(ratisContainer.getPipeline(), false);
     GenericTestUtils.waitFor(() -> {
       try {
         return containerManager
@@ -174,7 +177,8 @@
             ratisContainer.getPipeline().getId());
     // send closing action for pipeline
     PipelineActionHandler pipelineActionHandler =
-        new PipelineActionHandler(pipelineManager, conf);
+        new PipelineActionHandler(
+            pipelineManager, SCMContext.emptyContext(), conf);
     pipelineActionHandler
         .onMessage(pipelineActionsFromDatanode, new EventQueue());
     Thread.sleep(5000);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
index 785e494..97d2588 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
@@ -21,6 +21,7 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.HddsDatanodeService;
@@ -87,7 +88,7 @@
         .getPipelines(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN);
     for (Pipeline pipeline : pipelines) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+      pipelineManager.closePipeline(pipeline, false);
     }
     // make sure two pipelines are created
     waitForPipelines(2);
@@ -109,7 +110,7 @@
         .getPipelines(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN);
     for (Pipeline pipeline : pipelines) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+      pipelineManager.closePipeline(pipeline, false);
     }
 
     // make sure two pipelines are created
@@ -153,14 +154,16 @@
 
     // destroy the existing pipelines
     for (Pipeline pipeline : pipelines) {
-      pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+      pipelineManager.closePipeline(pipeline, false);
     }
 
     if (cluster.getStorageContainerManager()
         .getScmNodeManager().getNodeCount(NodeStatus.inServiceHealthy()) >=
         HddsProtos.ReplicationFactor.THREE.getNumber()) {
       // make sure pipelines is created after node start
-      pipelineManager.triggerPipelineCreation();
+      cluster.getStorageContainerManager()
+          .getSCMServiceManager()
+          .notifyEventTriggered(Event.PRE_CHECK_COMPLETED);
       waitForPipelines(1);
     }
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
index 3760e17..7466975 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
@@ -21,7 +21,7 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.AfterClass;
@@ -56,8 +56,8 @@
   private static OzoneConfiguration conf;
   private static Pipeline ratisPipeline1;
   private static Pipeline ratisPipeline2;
-  private static ContainerManager containerManager;
-  private static ContainerManager newContainerManager;
+  private static ContainerManagerV2 containerManager;
+  private static ContainerManagerV2 newContainerManager;
   private static PipelineManager pipelineManager;
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 87b2679..153b312 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -60,6 +60,10 @@
    *
    * @return MiniOzoneCluster builder
    */
+  static Builder newOMHABuilder(OzoneConfiguration conf) {
+    return new MiniOzoneOMHAClusterImpl.Builder(conf);
+  }
+
   static Builder newHABuilder(OzoneConfiguration conf) {
     return new MiniOzoneHAClusterImpl.Builder(conf);
   }
@@ -115,7 +119,15 @@
    *
    * @return Service ID String
    */
-  String getServiceId();
+  String getOMServiceId();
+
+
+  /**
+   * Returns StorageContainerManager Service ID.
+   *
+   * @return Service ID String
+   */
+  String getSCMServiceId();
 
   /**
    * Returns {@link StorageContainerManager} associated with this
@@ -274,6 +286,7 @@
     protected static final int DEFAULT_HB_INTERVAL_MS = 1000;
     protected static final int DEFAULT_HB_PROCESSOR_INTERVAL_MS = 100;
     protected static final int ACTIVE_OMS_NOT_SET = -1;
+    protected static final int ACTIVE_SCMS_NOT_SET = -1;
     protected static final int DEFAULT_PIPELIME_LIMIT = 3;
     protected static final int DEFAULT_RATIS_RPC_TIMEOUT_SEC = 1;
 
@@ -285,6 +298,10 @@
     protected int numOfOMs;
     protected int numOfActiveOMs = ACTIVE_OMS_NOT_SET;
 
+    protected String scmServiceId;
+    protected int numOfSCMs;
+    protected int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET;
+
     protected Optional<Boolean> enableTrace = Optional.of(false);
     protected Optional<Integer> hbInterval = Optional.empty();
     protected Optional<Integer> hbProcessorInterval = Optional.empty();
@@ -524,6 +541,21 @@
       return this;
     }
 
+    public Builder setNumOfStorageContainerManagers(int numSCMs) {
+      this.numOfSCMs = numSCMs;
+      return this;
+    }
+
+    public Builder setNumOfActiveSCMs(int numActiveSCMs) {
+      this.numOfActiveSCMs = numActiveSCMs;
+      return this;
+    }
+
+    public Builder setSCMServiceId(String serviceId) {
+      this.scmServiceId = serviceId;
+      return this;
+    }
+
     /**
      * Constructs and returns MiniOzoneCluster.
      *
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 4e046ab..b0ce55d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -30,6 +30,7 @@
 import java.util.Optional;
 import java.util.OptionalInt;
 import java.util.UUID;
+import java.util.Iterator;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
@@ -42,18 +43,19 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider;
 import org.apache.hadoop.hdds.scm.safemode.HealthyPipelineSafeModeRule;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
@@ -64,7 +66,6 @@
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.recon.ConfigurationProvider;
 import org.apache.hadoop.ozone.recon.ReconServer;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -148,7 +149,7 @@
 
   /**
    * Creates a new MiniOzoneCluster without the OzoneManager. This is used by
-   * {@link MiniOzoneHAClusterImpl} for starting multiple OzoneManagers.
+   * {@link MiniOzoneOMHAClusterImpl} for starting multiple OzoneManagers.
    *
    * @param conf
    * @param scm
@@ -162,27 +163,61 @@
     this.reconServer = reconServer;
   }
 
+  /**
+   * Creates a new MiniOzoneCluster without the OzoneManager and
+   * StorageContainerManager. This is used by
+   * {@link MiniOzoneHAClusterImpl} for starting multiple
+   * OzoneManagers and StorageContainerManagers.
+   *
+   * @param conf
+   * @param hddsDatanodes
+   */
+  MiniOzoneClusterImpl(OzoneConfiguration conf,
+      List<HddsDatanodeService> hddsDatanodes, ReconServer reconServer) {
+    this.conf = conf;
+    this.hddsDatanodes = hddsDatanodes;
+    this.reconServer = reconServer;
+  }
+
   @Override
   public OzoneConfiguration getConf() {
     return conf;
   }
 
   @Override
-  public String getServiceId() {
+  public String getOMServiceId() {
     // Non-HA cluster doesn't have OM Service Id.
     return null;
   }
 
+  @Override
+  public String getSCMServiceId() {
+    // Non-HA cluster doesn't have OM Service Id.
+    return null;
+  }
+
+  public void waitForSCMToBeReady() throws TimeoutException,
+      InterruptedException {
+    // Nothing implemented here
+  }
+
+  public StorageContainerManager getActiveSCM() {
+    return scm;
+  }
+
   /**
    * Waits for the Ozone cluster to be ready for processing requests.
    */
   @Override
   public void waitForClusterToBeReady()
       throws TimeoutException, InterruptedException {
+    waitForSCMToBeReady();
     GenericTestUtils.waitFor(() -> {
-      final int healthy = scm.getNodeCount(HEALTHY);
+      StorageContainerManager activeScm = getActiveSCM();
+      final int healthy = activeScm.getNodeCount(HEALTHY);
       final boolean isNodeReady = healthy == hddsDatanodes.size();
-      final boolean exitSafeMode = !scm.isInSafeMode();
+      final boolean exitSafeMode = !activeScm.isInSafeMode();
+      final boolean checkScmLeader = activeScm.checkLeader();
 
       LOG.info("{}. Got {} of {} DN Heartbeats.",
           isNodeReady ? "Nodes are ready" : "Waiting for nodes to be ready",
@@ -190,8 +225,10 @@
       LOG.info(exitSafeMode ? "Cluster exits safe mode" :
               "Waiting for cluster to exit safe mode",
           healthy, hddsDatanodes.size());
+      LOG.info(checkScmLeader ? "SCM became leader" :
+          "SCM has not become leader");
 
-      return isNodeReady && exitSafeMode;
+      return isNodeReady && exitSafeMode && checkScmLeader;
     }, 1000, waitForClusterToBeReadyTimeout);
   }
 
@@ -292,27 +329,27 @@
    */
   @Override
   public StorageContainerLocationProtocolClientSideTranslatorPB
-      getStorageContainerLocationClient() throws IOException {
-    long version = RPC.getProtocolVersion(
-        StorageContainerLocationProtocolPB.class);
+      getStorageContainerLocationClient() {
     InetSocketAddress address = scm.getClientRpcAddress();
     LOG.info(
         "Creating StorageContainerLocationProtocol RPC client with address {}",
         address);
+
+    SCMContainerLocationFailoverProxyProvider proxyProvider =
+        new SCMContainerLocationFailoverProxyProvider(conf);
+
     return new StorageContainerLocationProtocolClientSideTranslatorPB(
-        RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
-            address, UserGroupInformation.getCurrentUser(), conf,
-            NetUtils.getDefaultSocketFactory(conf),
-            Client.getRpcTimeout(conf)));
+        proxyProvider);
   }
 
   @Override
   public void restartStorageContainerManager(boolean waitForDatanode)
       throws TimeoutException, InterruptedException, IOException,
       AuthenticationException {
+    LOG.info("Restarting SCM in cluster " + this.getClass());
     scm.stop();
     scm.join();
-    scm = StorageContainerManager.createSCM(conf);
+    scm = TestUtils.getScmSimple(conf);
     scm.start();
     if (waitForDatanode) {
       waitForClusterToBeReady();
@@ -394,13 +431,17 @@
     shutdownHddsDatanode(getHddsDatanodeIndex(dn));
   }
 
+  public String getClusterId() throws IOException {
+    return scm.getClientProtocolServer().getScmInfo().getClusterId();
+  }
+
   @Override
   public void shutdown() {
     try {
       LOG.info("Shutting down the Mini Ozone Cluster");
       File baseDir = new File(GenericTestUtils.getTempPath(
           MiniOzoneClusterImpl.class.getSimpleName() + "-" +
-              scm.getClientProtocolServer().getScmInfo().getClusterId()));
+              getClusterId()));
       stop();
       FileUtils.deleteDirectory(baseDir);
       ContainerCache.getInstance(conf).shutdownCache();
@@ -549,7 +590,8 @@
           reconServer.execute(new String[] {});
         }
 
-        hddsDatanodes = createHddsDatanodes(scm, reconServer);
+        hddsDatanodes = createHddsDatanodes(
+            Collections.singletonList(scm), reconServer);
 
         MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, om, scm,
             hddsDatanodes, reconServer);
@@ -589,6 +631,7 @@
       Path metaDir = Paths.get(path, "ozone-meta");
       Files.createDirectories(metaDir);
       conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
+     // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
       if (!chunkSize.isPresent()) {
         //set it to 1MB by default in tests
         chunkSize = Optional.of(1);
@@ -649,7 +692,7 @@
       configureSCM();
       SCMStorageConfig scmStore = new SCMStorageConfig(conf);
       initializeScmStorage(scmStore);
-      StorageContainerManager scm = StorageContainerManager.createSCM(conf);
+      StorageContainerManager scm = TestUtils.getScmSimple(conf);
       HealthyPipelineSafeModeRule rule =
           scm.getScmSafeModeManager().getHealthyPipelineSafeModeRule();
       if (rule != null) {
@@ -660,7 +703,7 @@
       return scm;
     }
 
-    private void initializeScmStorage(SCMStorageConfig scmStore)
+    protected void initializeScmStorage(SCMStorageConfig scmStore)
         throws IOException {
       if (scmStore.getState() == StorageState.INITIALIZED) {
         return;
@@ -671,6 +714,10 @@
       }
       scmStore.setScmId(scmId.get());
       scmStore.initialize();
+      if (SCMHAUtils.isSCMHAEnabled(conf)) {
+        SCMRatisServerImpl.initialize(clusterId, scmId.get(),
+            SCMHANodeDetails.loadSCMHAConfig(conf).getLocalNodeDetails(), conf);
+      }
     }
 
     void initializeOmStorage(OMStorage omStorage) throws IOException {
@@ -678,7 +725,9 @@
         return;
       }
       omStorage.setClusterId(clusterId);
-      omStorage.setScmId(scmId.get());
+      if (scmId.isPresent()) {
+        omStorage.setScmId(scmId.get());
+      }
       omStorage.setOmId(omId.orElse(UUID.randomUUID().toString()));
       // Initialize ozone certificate client if security is enabled.
       if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
@@ -701,6 +750,21 @@
       return OzoneManager.createOm(conf);
     }
 
+    protected String getSCMAddresses(List<StorageContainerManager> scms) {
+      StringBuilder stringBuilder = new StringBuilder();
+      Iterator<StorageContainerManager> iter = scms.iterator();
+
+      while (iter.hasNext()) {
+        StorageContainerManager scm = iter.next();
+        stringBuilder.append(scm.getDatanodeRpcAddress().getHostString() +
+            ":" + scm.getDatanodeRpcAddress().getPort());
+        if (iter.hasNext()) {
+          stringBuilder.append(",");
+        }
+
+      }
+      return stringBuilder.toString();
+    }
     /**
      * Creates HddsDatanodeService(s) instance.
      *
@@ -708,11 +772,10 @@
      * @throws IOException
      */
     protected List<HddsDatanodeService> createHddsDatanodes(
-        StorageContainerManager scm, ReconServer reconServer)
+        List<StorageContainerManager> scms, ReconServer reconServer)
         throws IOException {
       configureHddsDatanodes();
-      String scmAddress = scm.getDatanodeRpcAddress().getHostString() +
-          ":" + scm.getDatanodeRpcAddress().getPort();
+      String scmAddress = getSCMAddresses(scms);
       String[] args = new String[] {};
       conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress);
       List<HddsDatanodeService> hddsDatanodes = new ArrayList<>();
@@ -755,7 +818,7 @@
       return hddsDatanodes;
     }
 
-    private void configureSCM() {
+    protected void configureSCM() {
       conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
       conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
       conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index 4df5f22..bd6e4b3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -21,15 +21,17 @@
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.safemode.HealthyPipelineSafeModeRule;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMStorage;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.recon.ReconServer;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
@@ -43,30 +45,27 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.function.Function;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 
 /**
  * MiniOzoneHAClusterImpl creates a complete in-process Ozone cluster
- * with OM HA suitable for running tests.  The cluster consists of a set of
- * OzoneManagers, StorageContainerManager and multiple DataNodes.
+ * with OM HA and SCM HA suitable for running tests.
+ * The cluster consists of a set of
+ * OzoneManagers, StorageContainerManagers and multiple DataNodes.
  */
 public class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(MiniOzoneHAClusterImpl.class);
 
-  private Map<String, OzoneManager> ozoneManagerMap;
-  private List<OzoneManager> ozoneManagers;
-  private String omServiceId;
+  private final OMHAService omhaService;
+  private final SCMHAService scmhaService;
 
-  // Active OMs denote OMs which are up and running
-  private List<OzoneManager> activeOMs;
-  private List<OzoneManager> inactiveOMs;
-
-  private int waitForOMToBeReadyTimeout = 120000; // 2 min
+  private int waitForClusterToBeReadyTimeout = 120000; // 2 min
 
   private static final Random RANDOM = new Random();
   private static final int RATIS_RPC_TIMEOUT = 1000; // 1 second
@@ -78,36 +77,21 @@
    * @throws IOException if there is an I/O error
    */
   @SuppressWarnings("checkstyle:ParameterNumber")
-  private MiniOzoneHAClusterImpl(
+  public MiniOzoneHAClusterImpl(
       OzoneConfiguration conf,
       List<OzoneManager> activeOMList,
       List<OzoneManager> inactiveOMList,
-      StorageContainerManager scm,
+      List<StorageContainerManager> activeSCMList,
+      List<StorageContainerManager> inactiveSCMList,
       List<HddsDatanodeService> hddsDatanodes,
       String omServiceId,
+      String scmServiceId,
       ReconServer reconServer) {
-    super(conf, scm, hddsDatanodes, reconServer);
-
-    this.ozoneManagerMap = Maps.newHashMap();
-    if (activeOMList != null) {
-      for (OzoneManager om : activeOMList) {
-        this.ozoneManagerMap.put(om.getOMNodeId(), om);
-      }
-    }
-    if (inactiveOMList != null) {
-      for (OzoneManager om : inactiveOMList) {
-        this.ozoneManagerMap.put(om.getOMNodeId(), om);
-      }
-    }
-    this.ozoneManagers = new ArrayList<>(ozoneManagerMap.values());
-    this.activeOMs = activeOMList;
-    this.inactiveOMs = inactiveOMList;
-    this.omServiceId = omServiceId;
-
-    // If the serviceID is null, then this should be a non-HA cluster.
-    if (omServiceId == null) {
-      Preconditions.checkArgument(ozoneManagers.size() <= 1);
-    }
+    super(conf, hddsDatanodes, reconServer);
+    omhaService =
+        new OMHAService(activeOMList, inactiveOMList, omServiceId);
+    scmhaService =
+        new SCMHAService(activeSCMList, inactiveSCMList, scmServiceId);
   }
 
   /**
@@ -117,15 +101,22 @@
   protected MiniOzoneHAClusterImpl(
       OzoneConfiguration conf,
       List<OzoneManager> omList,
-      StorageContainerManager scm,
+      List<StorageContainerManager> scmList,
       List<HddsDatanodeService> hddsDatanodes,
-      String omServiceId) {
-    this(conf, omList, null, scm, hddsDatanodes, omServiceId, null);
+      String omServiceId,
+      String scmServiceId) {
+    this(conf, omList, null, scmList, null, hddsDatanodes,
+        omServiceId, scmServiceId, null);
   }
 
   @Override
-  public String getServiceId() {
-    return omServiceId;
+  public String getOMServiceId() {
+    return omhaService.getServiceId();
+  }
+
+  @Override
+  public String getSCMServiceId() {
+    return scmhaService.getServiceId();
   }
 
   /**
@@ -134,11 +125,12 @@
    */
   @Override
   public OzoneManager getOzoneManager() {
-    return this.ozoneManagers.get(0);
+    return this.omhaService.getServices().get(0);
   }
 
   @Override
   public OzoneClient getRpcClient() throws IOException {
+    String omServiceId = omhaService.getServiceId();
     if (omServiceId == null) {
       // Non-HA cluster.
       return OzoneClientFactory.getRpcClient(getConf());
@@ -149,19 +141,35 @@
   }
 
   public boolean isOMActive(String omNodeId) {
-    return activeOMs.contains(ozoneManagerMap.get(omNodeId));
+    return omhaService.isServiceActive(omNodeId);
+  }
+
+  public boolean isSCMActive(String scmNodeId) {
+    return scmhaService.isServiceActive(scmNodeId);
+  }
+
+  public StorageContainerManager getSCM(String scmNodeId) {
+    return this.scmhaService.getServiceById(scmNodeId);
   }
 
   public OzoneManager getOzoneManager(int index) {
-    return this.ozoneManagers.get(index);
+    return this.omhaService.getServiceByIndex(index);
   }
 
   public OzoneManager getOzoneManager(String omNodeId) {
-    return this.ozoneManagerMap.get(omNodeId);
+    return this.omhaService.getServiceById(omNodeId);
   }
 
   public List<OzoneManager> getOzoneManagersList() {
-    return ozoneManagers;
+    return omhaService.getServices();
+  }
+
+  public List<StorageContainerManager> getStorageContainerManagersList() {
+    return scmhaService.getServices();
+  }
+
+  public StorageContainerManager getStorageContainerManager(int index) {
+    return this.scmhaService.getServiceByIndex(index);
   }
 
   /**
@@ -170,7 +178,7 @@
    */
   public OzoneManager getOMLeader() {
     OzoneManager res = null;
-    for (OzoneManager ozoneManager : this.ozoneManagers) {
+    for (OzoneManager ozoneManager : this.omhaService.getServices()) {
       if (ozoneManager.isLeaderReady()) {
         if (res != null) {
           // Found more than one leader
@@ -188,19 +196,20 @@
    * Start a previously inactive OM.
    */
   public void startInactiveOM(String omNodeID) throws IOException {
-    OzoneManager ozoneManager = ozoneManagerMap.get(omNodeID);
-    if (!inactiveOMs.contains(ozoneManager)) {
-      throw new IOException("OM is already active.");
-    } else {
-      ozoneManager.start();
-      activeOMs.add(ozoneManager);
-      inactiveOMs.remove(ozoneManager);
-    }
+    omhaService.startInactiveService(omNodeID, OzoneManager::start);
+  }
+
+  /**
+   * Start a previously inactive SCM.
+   */
+  public void startInactiveSCM(String scmNodeId) throws IOException {
+    scmhaService
+        .startInactiveService(scmNodeId, StorageContainerManager::start);
   }
 
   @Override
   public void restartOzoneManager() throws IOException {
-    for (OzoneManager ozoneManager : ozoneManagers) {
+    for (OzoneManager ozoneManager : this.omhaService.getServices()) {
       ozoneManager.stop();
       ozoneManager.restart();
     }
@@ -219,30 +228,86 @@
 
     if (waitForOM) {
       GenericTestUtils.waitFor(ozoneManager::isRunning,
-          1000, waitForOMToBeReadyTimeout);
+          1000, waitForClusterToBeReadyTimeout);
     }
   }
 
+  public void shutdownStorageContainerManager(StorageContainerManager scm) {
+    LOG.info("Shutting down StorageContainerManager " + scm.getScmId());
+
+    scm.stop();
+    scmhaService.removeInstance(scm);
+  }
+
+  public void restartStorageContainerManager(StorageContainerManager scm,
+      boolean waitForSCM) throws IOException, TimeoutException,
+      InterruptedException, AuthenticationException {
+    LOG.info("Restarting SCM in cluster " + this.getClass());
+    OzoneConfiguration scmConf = scm.getConfiguration();
+    shutdownStorageContainerManager(scm);
+    scm.join();
+    scm = TestUtils.getScmSimple(scmConf);
+    scmhaService.addInstance(scm);
+    scm.start();
+    if (waitForSCM) {
+      waitForClusterToBeReady();
+    }
+  }
+
+  public String getClusterId() throws IOException {
+    return scmhaService.getServices().get(0)
+        .getClientProtocolServer().getScmInfo().getClusterId();
+  }
+
+  public StorageContainerManager getActiveSCM() {
+    for (StorageContainerManager scm : scmhaService.getServices()) {
+      if (scm.checkLeader()) {
+        return scm;
+      }
+    }
+    return null;
+  }
+
+  public void waitForSCMToBeReady()
+      throws TimeoutException, InterruptedException  {
+    GenericTestUtils.waitFor(() -> {
+      for (StorageContainerManager scm : scmhaService.getServices()) {
+        if (scm.checkLeader()) {
+          return true;
+        }
+      }
+      return false;
+    }, 1000, waitForClusterToBeReadyTimeout);
+  }
+
   @Override
   public void stop() {
-    for (OzoneManager ozoneManager : ozoneManagers) {
+    for (OzoneManager ozoneManager : this.omhaService.getServices()) {
       if (ozoneManager != null) {
         LOG.info("Stopping the OzoneManager {}", ozoneManager.getOMNodeId());
         ozoneManager.stop();
         ozoneManager.join();
       }
     }
+
+    for (StorageContainerManager scm : this.scmhaService.getServices()) {
+      if (scm != null) {
+        LOG.info("Stopping the StorageContainerManager {}", scm.getScmId());
+        scm.stop();
+        scm.join();
+      }
+    }
     super.stop();
   }
 
   public void stopOzoneManager(int index) {
-    ozoneManagers.get(index).stop();
-    ozoneManagers.get(index).join();
+    omhaService.getServices().get(index).stop();
+    omhaService.getServices().get(index).join();
   }
 
   public void stopOzoneManager(String omNodeId) {
-    ozoneManagerMap.get(omNodeId).stop();
-    ozoneManagerMap.get(omNodeId).join();
+    omhaService.getServiceById(omNodeId).stop();
+    omhaService.getServiceById(omNodeId).join();
   }
 
   /**
@@ -250,10 +315,14 @@
    */
   public static class Builder extends MiniOzoneClusterImpl.Builder {
 
-    private static final String NODE_ID_PREFIX = "omNode-";
+    private static final String OM_NODE_ID_PREFIX = "omNode-";
     private List<OzoneManager> activeOMs = new ArrayList<>();
     private List<OzoneManager> inactiveOMs = new ArrayList<>();
 
+    private static final String SCM_NODE_ID_PREFIX = "scmNode-";
+    private List<StorageContainerManager> activeSCMs = new ArrayList<>();
+    private List<StorageContainerManager> inactiveSCMs = new ArrayList<>();
+
     /**
      * Creates a new Builder.
      *
@@ -263,6 +332,14 @@
       super(conf);
     }
 
+    public List<OzoneManager> getActiveOMs() {
+      return activeOMs;
+    }
+
+    public List<OzoneManager> getInactiveOMs() {
+      return inactiveOMs;
+    }
+
     @Override
     public MiniOzoneCluster build() throws IOException {
       if (numOfActiveOMs > numOfOMs) {
@@ -275,14 +352,18 @@
         numOfActiveOMs = numOfOMs;
       }
 
+      // If num of ActiveOMs is not set, set it to numOfOMs.
+      if (numOfActiveSCMs == ACTIVE_SCMS_NOT_SET) {
+        numOfActiveSCMs = numOfSCMs;
+      }
+
       DefaultMetricsSystem.setMiniClusterMode(true);
       initializeConfiguration();
       initOMRatisConf();
       StorageContainerManager scm;
       ReconServer reconServer = null;
       try {
-        scm = createSCM();
-        scm.start();
+        createSCMService();
         createOMService();
         if (includeRecon) {
           configureRecon();
@@ -294,10 +375,11 @@
       }
 
       final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(
-          scm, reconServer);
+          activeSCMs, reconServer);
 
       MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(conf,
-          activeOMs, inactiveOMs, scm, hddsDatanodes, omServiceId, reconServer);
+          activeOMs, inactiveOMs, activeSCMs, inactiveSCMs,
+          hddsDatanodes, omServiceId, scmServiceId, reconServer);
 
       if (startDataNodes) {
         cluster.startHddsDatanodes();
@@ -349,11 +431,11 @@
       while (true) {
         try {
           basePort = 10000 + RANDOM.nextInt(1000) * 4;
-          initHAConfig(basePort);
+          initOMHAConfig(basePort);
 
           for (int i = 1; i<= numOfOMs; i++) {
             // Set nodeId
-            String nodeId = NODE_ID_PREFIX + i;
+            String nodeId = OM_NODE_ID_PREFIX + i;
             OzoneConfiguration config = new OzoneConfiguration(conf);
             config.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, nodeId);
             // Set the OM http(s) address to null so that the cluster picks
@@ -364,9 +446,9 @@
             // Set metadata/DB dir base path
             String metaDirPath = path + "/" + nodeId;
             config.set(OZONE_METADATA_DIRS, metaDirPath);
-            OMStorage omStore = new OMStorage(config);
-            initializeOmStorage(omStore);
-
+           // OMStorage omStore = new OMStorage(config);
+           // initializeOmStorage(omStore);
+            OzoneManager.omInit(config);
             OzoneManager om = OzoneManager.createOm(config);
             if (certClient != null) {
               om.setCertClient(certClient);
@@ -408,31 +490,160 @@
     }
 
     /**
+     * Start OM service with multiple OMs.
+     */
+    protected List<StorageContainerManager> createSCMService()
+        throws IOException, AuthenticationException {
+      List<StorageContainerManager> scmList = Lists.newArrayList();
+
+      int retryCount = 0;
+      int basePort = 12000;
+
+      while (true) {
+        try {
+          basePort = 12000 + RANDOM.nextInt(1000) * 4;
+          initSCMHAConfig(basePort);
+
+          for (int i = 1; i<= numOfSCMs; i++) {
+            // Set nodeId
+            String nodeId = SCM_NODE_ID_PREFIX + i;
+            String metaDirPath = path + "/" + nodeId;
+            OzoneConfiguration scmConfig = new OzoneConfiguration(conf);
+            scmConfig.set(OZONE_METADATA_DIRS, metaDirPath);
+            scmConfig.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, nodeId);
+            scmConfig.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
+
+            configureSCM();
+            if (i == 1) {
+              StorageContainerManager.scmInit(scmConfig, clusterId);
+            } else {
+              StorageContainerManager.scmBootstrap(scmConfig);
+            }
+            StorageContainerManager scm = TestUtils.getScmSimple(scmConfig);
+            HealthyPipelineSafeModeRule rule =
+                scm.getScmSafeModeManager().getHealthyPipelineSafeModeRule();
+            if (rule != null) {
+              // Set threshold to wait for safe mode exit -
+              // this is needed since a pipeline is marked open only after
+              // leader election.
+              rule.setHealthyPipelineThresholdCount(numOfDatanodes / 3);
+            }
+            scmList.add(scm);
+
+            if (i <= numOfActiveSCMs) {
+              scm.start();
+              activeSCMs.add(scm);
+              LOG.info("Started SCM RPC server at {}",
+                  scm.getClientProtocolServer());
+            } else {
+              inactiveSCMs.add(scm);
+              LOG.info("Intialized SCM at {}. This SCM is currently "
+                  + "inactive (not running).", scm.getClientProtocolServer());
+            }
+          }
+
+
+          break;
+        } catch (BindException e) {
+          for (StorageContainerManager scm : scmList) {
+            scm.stop();
+            scm.join();
+            LOG.info("Stopping StorageContainerManager server at {}",
+                scm.getClientProtocolServer());
+          }
+          scmList.clear();
+          ++retryCount;
+          LOG.info("MiniOzoneHACluster port conflicts, retried {} times",
+              retryCount);
+        }
+      }
+      return scmList;
+    }
+
+    /**
      * Initialize HA related configurations.
      */
-    private void initHAConfig(int basePort) throws IOException {
+    private void initSCMHAConfig(int basePort) throws IOException {
+      // Set configurations required for starting OM HA service, because that
+      // is the serviceID being passed to start Ozone HA cluster.
+      // Here setting internal service and OZONE_OM_SERVICE_IDS_KEY, in this
+      // way in OM start it uses internal service id to find it's service id.
+      conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId);
+      conf.set(ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID, scmServiceId);
+      String scmNodesKey = ConfUtils.addKeySuffixes(
+          ScmConfigKeys.OZONE_SCM_NODES_KEY, scmServiceId);
+      StringBuilder scmNodesKeyValue = new StringBuilder();
+      StringBuilder scmNames = new StringBuilder();
+
+      int port = basePort;
+
+      for (int i = 1; i <= numOfSCMs; i++, port+=10) {
+        String scmNodeId = SCM_NODE_ID_PREFIX + i;
+        scmNodesKeyValue.append(",").append(scmNodeId);
+        String scmAddrKey = ConfUtils.addKeySuffixes(
+            ScmConfigKeys.OZONE_SCM_ADDRESS_KEY, scmServiceId, scmNodeId);
+        String scmHttpAddrKey = ConfUtils.addKeySuffixes(
+            ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, scmServiceId, scmNodeId);
+        String scmHttpsAddrKey = ConfUtils.addKeySuffixes(
+            ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, scmServiceId, scmNodeId);
+        String scmRatisPortKey = ConfUtils.addKeySuffixes(
+            ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY, scmServiceId, scmNodeId);
+        String dnPortKey = ConfUtils.addKeySuffixes(
+            ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY,
+            scmServiceId, scmNodeId);
+        String blockClientKey = ConfUtils.addKeySuffixes(
+            ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+            scmServiceId, scmNodeId);
+        String ssClientKey = ConfUtils.addKeySuffixes(
+            ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY,
+            scmServiceId, scmNodeId);
+        String scmGrpcPortKey = ConfUtils.addKeySuffixes(
+            ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, scmServiceId, scmNodeId);
+
+        conf.set(scmAddrKey, "127.0.0.1");
+        conf.set(scmHttpAddrKey, "127.0.0.1:" + (port + 2));
+        conf.set(scmHttpsAddrKey, "127.0.0.1:" + (port + 3));
+        conf.setInt(scmRatisPortKey, port + 4);
+        //conf.setInt("ozone.scm.ha.ratis.bind.port", port + 4);
+        conf.set(dnPortKey, "127.0.0.1:" + (port + 5));
+        conf.set(blockClientKey, "127.0.0.1:" + (port + 6));
+        conf.set(ssClientKey, "127.0.0.1:" + (port + 7));
+        conf.setInt(scmGrpcPortKey, port + 8);
+        scmNames.append(",").append("localhost:" + (port + 5));
+        conf.set(ScmConfigKeys.
+            OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:" + (port + 6));
+      }
+
+      conf.set(scmNodesKey, scmNodesKeyValue.substring(1));
+      conf.set(ScmConfigKeys.OZONE_SCM_NAMES, scmNames.substring(1));
+    }
+
+    /**
+     * Initialize HA related configurations.
+     */
+    private void initOMHAConfig(int basePort) throws IOException {
       // Set configurations required for starting OM HA service, because that
       // is the serviceID being passed to start Ozone HA cluster.
       // Here setting internal service and OZONE_OM_SERVICE_IDS_KEY, in this
       // way in OM start it uses internal service id to find it's service id.
       conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
       conf.set(OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID, omServiceId);
-      String omNodesKey = OmUtils.addKeySuffixes(
+      String omNodesKey = ConfUtils.addKeySuffixes(
           OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
       StringBuilder omNodesKeyValue = new StringBuilder();
 
       int port = basePort;
 
       for (int i = 1; i <= numOfOMs; i++, port+=6) {
-        String omNodeId = NODE_ID_PREFIX + i;
+        String omNodeId = OM_NODE_ID_PREFIX + i;
         omNodesKeyValue.append(",").append(omNodeId);
-        String omAddrKey = OmUtils.addKeySuffixes(
+        String omAddrKey = ConfUtils.addKeySuffixes(
             OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId);
-        String omHttpAddrKey = OmUtils.addKeySuffixes(
+        String omHttpAddrKey = ConfUtils.addKeySuffixes(
             OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId);
-        String omHttpsAddrKey = OmUtils.addKeySuffixes(
+        String omHttpsAddrKey = ConfUtils.addKeySuffixes(
             OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId);
-        String omRatisPortKey = OmUtils.addKeySuffixes(
+        String omRatisPortKey = ConfUtils.addKeySuffixes(
             OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNodeId);
 
         conf.set(omAddrKey, "127.0.0.1:" + port);
@@ -444,4 +655,118 @@
       conf.set(omNodesKey, omNodesKeyValue.substring(1));
     }
   }
+
+  @FunctionalInterface
+  public interface CheckedConsumer<T> {
+    void apply(T t) throws IOException;
+  }
+
+  /**
+   * MiniOzoneHAService is a helper class used for both SCM and OM HA.
+   * This class keeps track of active and inactive OM/SCM services
+   * @param <Type>
+   */
+  static class MiniOzoneHAService<Type> {
+    private Map<String, Type> serviceMap;
+    private List<Type> services;
+    private String serviceId;
+    private String serviceName;
+
+    // Active services s denote OM/SCM services which are up and running
+    private List<Type> activeServices;
+    private List<Type> inactiveServices;
+
+    MiniOzoneHAService(String name, List<Type> activeList,
+        List<Type> inactiveList, String serviceId,
+        Function<Type, String> idProvider) {
+      this.serviceName = name;
+      this.serviceMap = Maps.newHashMap();
+      if (activeList != null) {
+        for (Type service : activeList) {
+          this.serviceMap.put(idProvider.apply(service), service);
+        }
+      }
+      if (inactiveList != null) {
+        for (Type service : inactiveList) {
+          this.serviceMap.put(idProvider.apply(service), service);
+        }
+      }
+      this.services = new ArrayList<>(serviceMap.values());
+      this.activeServices = activeList;
+      this.inactiveServices = inactiveList;
+      this.serviceId = serviceId;
+
+      // If the serviceID is null, then this should be a non-HA cluster.
+      if (serviceId == null) {
+        Preconditions.checkArgument(services.size() <= 1);
+      }
+    }
+
+    public String getServiceId() {
+      return serviceId;
+    }
+
+    public List<Type> getServices() {
+      return services;
+    }
+
+    public boolean removeInstance(Type t) {
+      return services.remove(t);
+    }
+
+    public boolean addInstance(Type t) {
+      return services.add(t);
+    }
+
+    public boolean isServiceActive(String id) {
+      return activeServices.contains(serviceMap.get(id));
+    }
+
+    public Type getServiceByIndex(int index) {
+      return this.services.get(index);
+    }
+
+    public Type getServiceById(String id) {
+      return this.serviceMap.get(id);
+    }
+
+    public void startInactiveService(String id,
+        CheckedConsumer<Type> serviceStarter) throws IOException {
+      Type service = serviceMap.get(id);
+      if (!inactiveServices.contains(service)) {
+        throw new IOException(serviceName + " is already active.");
+      } else {
+        serviceStarter.apply(service);
+        activeServices.add(service);
+        inactiveServices.remove(service);
+      }
+    }
+  }
+
+  static class OMHAService extends MiniOzoneHAService<OzoneManager> {
+    OMHAService(List<OzoneManager> activeList, List<OzoneManager> inactiveList,
+                String serviceId) {
+      super("OM", activeList, inactiveList, serviceId,
+          OzoneManager::getOMNodeId);
+    }
+  }
+
+  static class SCMHAService extends
+      MiniOzoneHAService<StorageContainerManager> {
+    SCMHAService(List<StorageContainerManager> activeList,
+                 List<StorageContainerManager> inactiveList,
+                 String serviceId) {
+      super("SCM", activeList, inactiveList, serviceId,
+          StorageContainerManager::getScmId);
+    }
+  }
+
+  public List<StorageContainerManager> getStorageContainerManagers() {
+    return this.scmhaService.getServices();
+  }
+
+  public StorageContainerManager getStorageContainerManager() {
+    return getStorageContainerManagers().get(0);
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneOMHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneOMHAClusterImpl.java
new file mode 100644
index 0000000..a28c07a
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneOMHAClusterImpl.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.recon.ReconServer;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Collections;
+
+/**
+ * MiniOzoneOMHAClusterImpl creates a complete in-process Ozone cluster
+ * with OM HA suitable for running tests.  The cluster consists of a set of
+ * OzoneManagers, StorageContainerManager and multiple DataNodes.
+ */
+public final class MiniOzoneOMHAClusterImpl extends MiniOzoneHAClusterImpl {
+  public static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds
+
+  /**
+   * Creates a new MiniOzoneOMHACluster.
+   *
+   * @throws IOException if there is an I/O error
+   */
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private MiniOzoneOMHAClusterImpl(
+      OzoneConfiguration conf,
+      List<OzoneManager> activeOMList,
+      List<OzoneManager> inactiveOMList,
+      StorageContainerManager scm,
+      List<HddsDatanodeService> hddsDatanodes,
+      String omServiceId,
+      ReconServer reconServer) {
+    super(conf, activeOMList, inactiveOMList, Collections.singletonList(scm),
+        null, hddsDatanodes, omServiceId, null, reconServer);
+  }
+
+  /**
+   * Builder for configuring the MiniOzoneCluster to run.
+   */
+  public static class Builder extends MiniOzoneHAClusterImpl.Builder {
+
+    /**
+     * Creates a new Builder.
+     *
+     * @param conf configuration
+     */
+    public Builder(OzoneConfiguration conf) {
+      super(conf);
+    }
+
+    @Override
+    public MiniOzoneCluster build() throws IOException {
+      if (numOfActiveOMs > numOfOMs) {
+        throw new IllegalArgumentException("Number of active OMs cannot be " +
+            "more than the total number of OMs");
+      }
+
+      // If num of ActiveOMs is not set, set it to numOfOMs.
+      if (numOfActiveOMs == ACTIVE_OMS_NOT_SET) {
+        numOfActiveOMs = numOfOMs;
+      }
+
+      DefaultMetricsSystem.setMiniClusterMode(true);
+      initializeConfiguration();
+      initOMRatisConf();
+      StorageContainerManager scm;
+      ReconServer reconServer = null;
+      try {
+        scm = createSCM();
+        scm.start();
+        createOMService();
+        if (includeRecon) {
+          configureRecon();
+          reconServer = new ReconServer();
+          reconServer.execute(new String[] {});
+        }
+      } catch (AuthenticationException ex) {
+        throw new IOException("Unable to build MiniOzoneCluster. ", ex);
+      }
+
+      final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(
+          Collections.singletonList(scm), reconServer);
+
+      MiniOzoneClusterImpl cluster = new MiniOzoneOMHAClusterImpl(conf,
+          getActiveOMs(), getInactiveOMs(), scm, hddsDatanodes,
+          omServiceId, reconServer);
+
+      if (startDataNodes) {
+        cluster.startHddsDatanodes();
+      }
+      return cluster;
+    }
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index dd543ed..7cde08d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -58,21 +59,21 @@
       StorageContainerManager scm) throws Exception {
     performOperationOnKeyContainers((blockID) -> {
       if (scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
+          .getContainer(ContainerID.valueOf(blockID.getContainerID()))
           .getState() == HddsProtos.LifeCycleState.OPEN) {
         scm.getContainerManager()
-            .updateContainerState(ContainerID.valueof(blockID.getContainerID()),
+            .updateContainerState(ContainerID.valueOf(blockID.getContainerID()),
                 HddsProtos.LifeCycleEvent.FINALIZE);
       }
       if (scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
+          .getContainer(ContainerID.valueOf(blockID.getContainerID()))
           .getState() == HddsProtos.LifeCycleState.CLOSING) {
         scm.getContainerManager()
-            .updateContainerState(ContainerID.valueof(blockID.getContainerID()),
+            .updateContainerState(ContainerID.valueOf(blockID.getContainerID()),
                 HddsProtos.LifeCycleEvent.CLOSE);
       }
       Assert.assertFalse(scm.getContainerManager()
-          .getContainer(ContainerID.valueof(blockID.getContainerID()))
+          .getContainer(ContainerID.valueOf(blockID.getContainerID()))
           .isOpen());
     }, omKeyLocationInfoGroups);
   }
@@ -87,9 +88,10 @@
    */
   public static void closeAllContainers(EventPublisher eventPublisher,
       StorageContainerManager scm) {
-    for (ContainerID containerID :
-        scm.getContainerManager().getContainerIDs()) {
-      eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID);
+    for (ContainerInfo container :
+        scm.getContainerManager().getContainers()) {
+      eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER,
+          container.containerID());
     }
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
similarity index 93%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneHACluster.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
index f747651..7eeba4b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneHACluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
@@ -39,11 +39,11 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
 
 /**
- * This class tests MiniOzoneHAClusterImpl.
+ * This class tests MiniOzoneOMHAClusterImpl.
  */
-public class TestMiniOzoneHACluster {
+public class TestMiniOzoneOMHACluster {
 
-  private MiniOzoneHAClusterImpl cluster = null;
+  private MiniOzoneOMHAClusterImpl cluster = null;
   private OzoneConfiguration conf;
   private String clusterId;
   private String scmId;
@@ -71,7 +71,7 @@
     conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS,
         OZONE_ADMINISTRATORS_WILDCARD);
     conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+    cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
         .setClusterId(clusterId)
         .setScmId(scmId)
         .setOMServiceId(omServiceId)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 5a9c796..6301a5f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -54,6 +54,8 @@
     errorIfMissingXmlProps = true;
     xmlPropsToSkipCompare.add("hadoop.tags.custom");
     xmlPropsToSkipCompare.add("ozone.om.nodes.EXAMPLEOMSERVICEID");
+    xmlPropsToSkipCompare.add("ozone.scm.nodes.EXAMPLESCMSERVICEID");
+    xmlPrefixToSkipCompare.add("ipc.client.rpc-timeout.ms");
     xmlPropsToSkipCompare.add("ozone.om.leader.election.minimum.timeout" +
         ".duration"); // Deprecated config
     addPropertiesNotInXml();
@@ -67,6 +69,8 @@
         HddsConfigKeys.HDDS_SECURITY_PROVIDER,
         HddsConfigKeys.HDDS_X509_CRL_NAME, // HDDS-2873
         OMConfigKeys.OZONE_OM_NODES_KEY,
+        ScmConfigKeys.OZONE_SCM_NODES_KEY,
+        ScmConfigKeys.OZONE_SCM_ADDRESS_KEY,
         OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY,
         OMConfigKeys.OZONE_FS_TRASH_CHECKPOINT_INTERVAL_KEY,
         OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE,
@@ -82,7 +86,8 @@
         ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY,
         ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM,
         OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY,
-        OMConfigKeys.OZONE_OM_HA_PREFIX
+        OMConfigKeys.OZONE_OM_HA_PREFIX,
+        ScmConfigKeys.OZONE_SCM_HA_SECURITY_SUPPORTED
         // TODO HDDS-2856
     ));
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 746f9f9..d0ead1b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -36,12 +36,16 @@
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
 import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
@@ -49,7 +53,6 @@
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.minikdc.MiniKdc;
@@ -88,6 +91,8 @@
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY;
 import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.net.ServerSocketUtil.getPort;
@@ -98,6 +103,7 @@
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED;
 import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
 import org.apache.ratis.protocol.ClientId;
@@ -168,6 +174,11 @@
           getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
       conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
           getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));
+      // use the same base ports as MiniOzoneHACluster
+      conf.setInt(OZONE_SCM_RATIS_PORT_KEY, getPort(1200, 100));
+      conf.setInt(OZONE_SCM_GRPC_PORT_KEY, getPort(1201, 100));
+      conf.set(OZONE_OM_ADDRESS_KEY, "localhost:1202");
+
 
       DefaultMetricsSystem.setMiniClusterMode(true);
       final String path = folder.newFolder().toString();
@@ -265,7 +276,7 @@
   public void testSecureScmStartupSuccess() throws Exception {
 
     initSCM();
-    scm = StorageContainerManager.createSCM(conf);
+    scm = TestUtils.getScmSimple(conf);
     //Reads the SCM Info from SCM instance
     ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
     assertEquals(clusterId, scmInfo.getClusterId());
@@ -276,7 +287,7 @@
   public void testSCMSecurityProtocol() throws Exception {
 
     initSCM();
-    scm = HddsTestUtils.getScm(conf);
+    scm = TestUtils.getScmSimple(conf);
     //Reads the SCM Info from SCM instance
     try {
       scm.start();
@@ -291,7 +302,8 @@
       assertNotNull(scmSecurityProtocolClient);
       String caCert = scmSecurityProtocolClient.getCACertificate();
       assertNotNull(caCert);
-      LambdaTestUtils.intercept(RemoteException.class, "Certificate not found",
+      LambdaTestUtils.intercept(SCMSecurityException.class,
+          "Certificate not found",
           () -> scmSecurityProtocolClient.getCertificate("1"));
 
       // Case 2: User without Kerberos credentials should fail.
@@ -326,6 +338,10 @@
     scmStore.setScmId(scmId);
     // writes the version file properties
     scmStore.initialize();
+    if (SCMHAUtils.isSCMHAEnabled(conf)) {
+      SCMRatisServerImpl.initialize(clusterId, scmId,
+          SCMHANodeDetails.loadSCMHAConfig(conf).getLocalNodeDetails(), conf);
+    }
   }
 
   @Test
@@ -336,7 +352,7 @@
 
     LambdaTestUtils.intercept(IOException.class,
         "Running in secure mode, but config doesn't have a keytab",
-        () -> StorageContainerManager.createSCM(conf));
+        () -> TestUtils.getScmSimple(conf));
 
     conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
         "scm/_HOST@EXAMPLE.com");
@@ -344,7 +360,7 @@
         "/etc/security/keytabs/scm.keytab");
 
     testCommonKerberosFailures(
-        () -> StorageContainerManager.createSCM(conf));
+        () -> TestUtils.getScmSimple(conf));
 
   }
 
@@ -373,7 +389,7 @@
   public void testSecureOMInitializationFailure() throws Exception {
     initSCM();
     // Create a secure SCM instance as om client will connect to it
-    scm = StorageContainerManager.createSCM(conf);
+    scm = TestUtils.getScmSimple(conf);
     setupOm(conf);
     conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY,
         "non-existent-user@EXAMPLE.com");
@@ -387,7 +403,7 @@
   public void testSecureOmInitializationSuccess() throws Exception {
     initSCM();
     // Create a secure SCM instance as om client will connect to it
-    scm = StorageContainerManager.createSCM(conf);
+    scm = TestUtils.getScmSimple(conf);
     LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger());
     GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO);
 
@@ -405,7 +421,7 @@
   public void testAccessControlExceptionOnClient() throws Exception {
     initSCM();
     // Create a secure SCM instance as om client will connect to it
-    scm = StorageContainerManager.createSCM(conf);
+    scm = TestUtils.getScmSimple(conf);
     LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger());
     GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO);
     setupOm(conf);
@@ -625,7 +641,7 @@
 
     initSCM();
     try {
-      scm = HddsTestUtils.getScm(conf);
+      scm = TestUtils.getScmSimple(conf);
       scm.start();
       conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false);
       OMStorage omStore = new OMStorage(conf);
@@ -671,7 +687,7 @@
     omLogs.clearOutput();
     initSCM();
     try {
-      scm = HddsTestUtils.getScm(conf);
+      scm = TestUtils.getScmSimple(conf);
       scm.start();
 
       OMStorage omStore = new OMStorage(conf);
@@ -698,6 +714,9 @@
       if (scm != null) {
         scm.stop();
       }
+      if (om != null) {
+        om.stop();
+      }
       IOUtils.closeQuietly(om);
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 60e3ef0..488d8b0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -17,6 +17,45 @@
  */
 package org.apache.hadoop.ozone;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
+    .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_COMMAND_STATUS_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
+import static org.junit.Assert.fail;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.Optional;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Stream;
+import java.util.Arrays;
+
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.apache.commons.io.FileUtils;
@@ -41,6 +80,7 @@
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.ha.*;
 import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
@@ -63,6 +103,9 @@
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.ratis.conf.RaftProperties;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.server.RaftServerConfigKeys;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -76,31 +119,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.time.Duration;
-import java.util.Map;
-import java.util.List;
-import java.util.Set;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.argThat;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 
 /**
  * Test class that exercises the StorageContainerManager.
@@ -175,8 +193,7 @@
         } else {
           // If passes permission check, it should fail with
           // container not exist exception.
-          Assert.assertTrue(e.getMessage()
-              .contains("container doesn't exist"));
+          Assert.assertTrue(e instanceof ContainerNotFoundException);
         }
       }
 
@@ -274,8 +291,10 @@
             cluster.getStorageContainerManager());
       }
 
-      Map<Long, List<Long>> containerBlocks = createDeleteTXLog(delLog,
-          keyLocations, helper);
+      Map<Long, List<Long>> containerBlocks = createDeleteTXLog(
+          cluster.getStorageContainerManager(),
+          delLog, keyLocations, helper);
+
       // Verify a few TX gets created in the TX log.
       Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
 
@@ -286,6 +305,10 @@
       // empty again.
       GenericTestUtils.waitFor(() -> {
         try {
+          if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) {
+            cluster.getStorageContainerManager().getScmHAManager()
+                .asSCMHADBTransactionBuffer().flush();
+          }
           return delLog.getNumOfValidTransactions() == 0;
         } catch (IOException e) {
           return false;
@@ -296,10 +319,13 @@
       // but unknown block IDs.
       for (Long containerID : containerBlocks.keySet()) {
         // Add 2 TXs per container.
-        delLog.addTransaction(containerID,
-            Collections.singletonList(RandomUtils.nextLong()));
-        delLog.addTransaction(containerID,
-            Collections.singletonList(RandomUtils.nextLong()));
+        Map<Long, List<Long>> deletedBlocks = new HashMap<>();
+        List<Long> blocks = new ArrayList<>();
+        blocks.add(RandomUtils.nextLong());
+        blocks.add(RandomUtils.nextLong());
+        deletedBlocks.put(containerID, blocks);
+        addTransactions(cluster.getStorageContainerManager(), delLog,
+            deletedBlocks);
       }
 
       // Verify a few TX gets created in the TX log.
@@ -309,11 +335,15 @@
       // eventually these TX will success.
       GenericTestUtils.waitFor(() -> {
         try {
+          if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) {
+            cluster.getStorageContainerManager().getScmHAManager()
+                .asSCMHADBTransactionBuffer().flush();
+          }
           return delLog.getFailedTransactions().size() == 0;
         } catch (IOException e) {
           return false;
         }
-      }, 1000, 10000);
+      }, 1000, 20000);
     } finally {
       cluster.shutdown();
     }
@@ -364,7 +394,8 @@
             cluster.getStorageContainerManager());
       }
 
-      createDeleteTXLog(delLog, keyLocations, helper);
+      createDeleteTXLog(cluster.getStorageContainerManager(),
+          delLog, keyLocations, helper);
       // Verify a few TX gets created in the TX log.
       Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
 
@@ -390,7 +421,9 @@
     }
   }
 
-  private Map<Long, List<Long>> createDeleteTXLog(DeletedBlockLog delLog,
+  private Map<Long, List<Long>> createDeleteTXLog(
+      StorageContainerManager scm,
+      DeletedBlockLog delLog,
       Map<String, OmKeyInfo> keyLocations,
       TestStorageContainerManagerHelper helper) throws IOException {
     // These keys will be written into a bunch of containers,
@@ -428,9 +461,7 @@
         }
       });
     }
-    for (Map.Entry<Long, List<Long>> tx : containerBlocks.entrySet()) {
-      delLog.addTransaction(tx.getKey(), tx.getValue());
-    }
+    addTransactions(scm, delLog, containerBlocks);
 
     return containerBlocks;
   }
@@ -443,15 +474,32 @@
     Path scmPath = Paths.get(path, "scm-meta");
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
 
+    UUID clusterId = UUID.randomUUID();
+    String testClusterId = clusterId.toString();
     // This will initialize SCM
-    StorageContainerManager.scmInit(conf, "testClusterId");
+    StorageContainerManager.scmInit(conf, testClusterId);
 
     SCMStorageConfig scmStore = new SCMStorageConfig(conf);
     Assert.assertEquals(NodeType.SCM, scmStore.getNodeType());
-    Assert.assertEquals("testClusterId", scmStore.getClusterID());
-    StorageContainerManager.scmInit(conf, "testClusterIdNew");
+    Assert.assertEquals(testClusterId, scmStore.getClusterID());
+    StorageContainerManager.scmInit(conf, testClusterId);
     Assert.assertEquals(NodeType.SCM, scmStore.getNodeType());
-    Assert.assertEquals("testClusterId", scmStore.getClusterID());
+    Assert.assertEquals(testClusterId, scmStore.getClusterID());
+  }
+
+  @Test
+  public void testSCMInitializationWithHAEnabled() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
+    final String path = GenericTestUtils.getTempPath(
+        UUID.randomUUID().toString());
+    Path scmPath = Paths.get(path, "scm-meta");
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+
+    final UUID clusterId = UUID.randomUUID();
+    // This will initialize SCM
+    StorageContainerManager.scmInit(conf, clusterId.toString());
+    validateRatisGroupExists(conf, clusterId.toString());
   }
 
   @Test
@@ -466,11 +514,83 @@
         MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
     cluster.waitForClusterToBeReady();
     try {
+      final UUID clusterId = UUID.randomUUID();
       // This will initialize SCM
-      StorageContainerManager.scmInit(conf, "testClusterId");
+      StorageContainerManager.scmInit(conf, clusterId.toString());
       SCMStorageConfig scmStore = new SCMStorageConfig(conf);
-      Assert.assertEquals(NodeType.SCM, scmStore.getNodeType());
-      Assert.assertNotEquals("testClusterId", scmStore.getClusterID());
+      Assert.assertNotEquals(clusterId.toString(), scmStore.getClusterID());
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  @VisibleForTesting
+  public static void validateRatisGroupExists(OzoneConfiguration conf,
+      String clusterId) throws IOException {
+    final SCMHAConfiguration haConf = conf.getObject(SCMHAConfiguration.class);
+    final RaftProperties properties = RatisUtil.newRaftProperties(haConf, conf);
+    final RaftGroupId raftGroupId =
+        SCMRatisServerImpl.buildRaftGroupId(clusterId);
+    final AtomicBoolean found = new AtomicBoolean(false);
+    RaftServerConfigKeys.storageDir(properties).parallelStream().forEach(
+        (dir) -> Optional.ofNullable(dir.listFiles()).map(Arrays::stream)
+            .orElse(Stream.empty()).filter(File::isDirectory).forEach(sub -> {
+              try {
+                LOG.info("{}: found a subdirectory {}", raftGroupId, sub);
+                RaftGroupId groupId = null;
+                try {
+                  groupId = RaftGroupId.valueOf(UUID.fromString(sub.getName()));
+                } catch (Exception e) {
+                  LOG.info("{}: The directory {} is not a group directory;"
+                      + " ignoring it. ", raftGroupId, sub.getAbsolutePath());
+                }
+                if (groupId != null) {
+                  if (groupId.equals(raftGroupId)) {
+                    LOG.info(
+                        "{} : The directory {} found a group directory for "
+                            + "cluster {}", raftGroupId, sub.getAbsolutePath(),
+                        clusterId);
+                    found.set(true);
+                  }
+                }
+              } catch (Exception e) {
+                LOG.warn(
+                    raftGroupId + ": Failed to find the group directory "
+                        + sub.getAbsolutePath() + ".", e);
+              }
+            }));
+    if (!found.get()) {
+      throw new IOException(
+          "Could not find any ratis group with id " + raftGroupId);
+    }
+  }
+  @Test
+  public void testSCMReinitializationWithHAEnabled() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false);
+    final String path = GenericTestUtils.getTempPath(
+        UUID.randomUUID().toString());
+    Path scmPath = Paths.get(path, "scm-meta");
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+    //This will set the cluster id in the version file
+    MiniOzoneCluster cluster =
+        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
+    cluster.waitForClusterToBeReady();
+    try {
+      final String clusterId =
+          cluster.getStorageContainerManager().getClusterId();
+      // validate there is no ratis group pre existing
+      try {
+        validateRatisGroupExists(conf, clusterId);
+        Assert.fail();
+      } catch (IOException ioe) {
+        // Exception is expected here
+      }
+      conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
+      // This will re-initialize SCM
+      StorageContainerManager.scmInit(conf, clusterId);
+      // Ratis group with cluster id exists now
+      validateRatisGroupExists(conf, clusterId);
     } finally {
       cluster.shutdown();
     }
@@ -487,7 +607,7 @@
     exception.expect(SCMException.class);
     exception.expectMessage(
         "SCM not initialized due to storage config failure");
-    StorageContainerManager.createSCM(conf);
+    TestUtils.getScmSimple(conf);
   }
 
   @Test
@@ -505,7 +625,7 @@
       scmStore.setScmId(scmId);
       // writes the version file properties
       scmStore.initialize();
-      StorageContainerManager scm = StorageContainerManager.createSCM(conf);
+      StorageContainerManager scm = TestUtils.getScmSimple(conf);
       //Reads the SCM Info from SCM instance
       ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
       Assert.assertEquals(clusterId, scmInfo.getClusterId());
@@ -638,10 +758,16 @@
           dnUuid, closeContainerCommand);
 
       GenericTestUtils.waitFor(() -> {
-        return replicationManager.isRunning();
+        SCMContext scmContext
+            = cluster.getStorageContainerManager().getScmContext();
+        return !scmContext.isInSafeMode() && scmContext.isLeader();
       }, 1000, 25000);
 
+      // After safe mode is off, ReplicationManager starts to run with a delay.
+      Thread.sleep(5000);
       // Give ReplicationManager some time to process the containers.
+      cluster.getStorageContainerManager()
+          .getReplicationManager().processContainersNow();
       Thread.sleep(5000);
 
       verify(publisher).fireEvent(eq(SCMEvents.DATANODE_COMMAND), argThat(new
@@ -651,6 +777,16 @@
     }
   }
 
+  private void addTransactions(StorageContainerManager scm,
+      DeletedBlockLog delLog,
+      Map<Long, List<Long>> containerBlocksMap)
+      throws IOException {
+    delLog.addTransactions(containerBlocksMap);
+    if (SCMHAUtils.isSCMHAEnabled(scm.getConfiguration())) {
+      scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
+    }
+  }
+
   @SuppressWarnings("visibilitymodifier")
   static class CloseContainerCommandMatcher
       extends ArgumentMatcher<CommandForDatanode> {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java
index 5910b2c..feaf633 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java
@@ -183,4 +183,9 @@
     return securityConfig.getProvider();
   }
 
+  @Override
+  public String getComponentName() {
+    return null;
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
index 1bbc635..29bda36 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -161,7 +161,7 @@
     long containerID = omKeyLocationInfo.getContainerID();
     PipelineID pipelineID =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(new ContainerID(containerID)).getPipelineID();
+            .getContainer(ContainerID.valueOf(containerID)).getPipelineID();
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(pipelineID);
@@ -169,13 +169,13 @@
 
     HddsProtos.LifeCycleState containerState =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(new ContainerID(containerID)).getState();
+            .getContainer(ContainerID.valueOf(containerID)).getState();
     LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info(
         "Current Container State is {}",  containerState);
     if ((containerState != HddsProtos.LifeCycleState.CLOSING) &&
         (containerState != HddsProtos.LifeCycleState.CLOSED)) {
       cluster.getStorageContainerManager().getContainerManager()
-          .updateContainerState(new ContainerID(containerID),
+          .updateContainerState(ContainerID.valueOf(containerID),
               HddsProtos.LifeCycleEvent.FINALIZE);
     }
     // wait for container to move to OPEN state in SCM
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
index f2d8b0d..fa083cb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
@@ -114,6 +114,7 @@
             .setHbInterval(200)
             .setCertificateClient(new CertificateClientTestImpl(conf))
             .build();
+    cluster.setWaitForClusterToBeReadyTimeout(300000);
     cluster.waitForClusterToBeReady();
     cluster.getOzoneManager().startSecretManager();
     //the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java
index 58ef998..dfd2692 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -65,6 +66,7 @@
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 import org.junit.AfterClass;
@@ -117,6 +119,9 @@
         TimeUnit.SECONDS);
     conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000,
         TimeUnit.SECONDS);
+    conf.setTimeDuration(OZONE_SCM_PIPELINE_CREATION_INTERVAL, 1000,
+        TimeUnit.SECONDS);
+
     DatanodeRatisServerConfig ratisServerConfig =
         conf.getObject(DatanodeRatisServerConfig.class);
     ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000));
@@ -278,9 +283,17 @@
     client.getObjectStore().getVolume(volumeName).getBucket(bucketName).
             deleteKey("ratis");
     GenericTestUtils.waitFor(() -> {
-      return
-          dnStateMachine.getCommandDispatcher().getDeleteBlocksCommandHandler()
-              .getInvocationCount() >= 1;
+      try {
+        if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) {
+          cluster.getStorageContainerManager().getScmHAManager()
+              .asSCMHADBTransactionBuffer().flush();
+        }
+        return
+            dnStateMachine.getCommandDispatcher()
+                .getDeleteBlocksCommandHandler().getInvocationCount() >= 1;
+      } catch (IOException e) {
+        return false;
+      }
     }, 500, 100000);
     Assert.assertTrue(containerData.getDeleteTransactionId() > delTrxId);
     Assert.assertTrue(
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
index ebdc1e7..ac62bc0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
@@ -154,7 +154,7 @@
     long containerID = locationInfos.get(0).getContainerID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerID));
+            .getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 94b822a..a303dcd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -182,7 +182,7 @@
     long containerId = locationInfoList.get(0).getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
         .getContainerManager()
-        .getContainer(ContainerID.valueof(containerId));
+        .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
@@ -222,7 +222,7 @@
     BlockID blockId = locationInfoList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
@@ -286,7 +286,7 @@
     key.flush();
 
     Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds()
-        .contains(ContainerID.valueof(containerId)));
+        .contains(ContainerID.valueOf(containerId)));
     Assert.assertTrue(
         keyOutputStream.getExcludeList().getDatanodes().isEmpty());
     Assert.assertTrue(
@@ -335,7 +335,7 @@
     BlockID blockId = streamEntryList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
@@ -398,7 +398,7 @@
     BlockID blockId = streamEntryList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index 516bb1f..e23f0d4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -184,7 +184,7 @@
     BlockID blockId = streamEntryList.get(0).getBlockID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index 8f85b14..1ae0e33 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -160,7 +160,7 @@
     long containerId = locationInfoList.get(1).getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
         .getContainerManager()
-        .getContainer(ContainerID.valueof(containerId));
+        .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
@@ -207,7 +207,7 @@
     long containerId = streamEntryList.get(0).getBlockID().getContainerID();
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerId));
+            .getContainer(ContainerID.valueOf(containerId));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
index 49243ee..676c096 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
@@ -91,6 +91,7 @@
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
+  private static final String CLUSTER_ID = UUID.randomUUID().toString();
   private static File testDir;
   private static OzoneConfiguration conf;
   private static final String TEST_KEY = "key1";
@@ -126,6 +127,7 @@
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(10)
         .setScmId(SCM_ID)
+        .setClusterId(CLUSTER_ID)
         .setBlockSize(BLOCK_SIZE)
         .setChunkSize(CHUNK_SIZE)
         .setStreamBufferSizeUnit(StorageUnit.BYTES)
@@ -145,7 +147,7 @@
     TestOzoneRpcClient.setStorageContainerLocationClient(
         storageContainerLocationClient);
     TestOzoneRpcClient.setStore(store);
-    TestOzoneRpcClient.setScmId(SCM_ID);
+    TestOzoneRpcClient.setClusterId(CLUSTER_ID);
 
     // create test key
     createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
index c85c8d6..06dd9fa 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
@@ -156,7 +156,7 @@
     Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
     ContainerInfo container =
         cluster.getStorageContainerManager().getContainerManager()
-            .getContainer(ContainerID.valueof(containerID));
+            .getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline =
         cluster.getStorageContainerManager().getPipelineManager()
             .getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index bf0cc83..3cda449 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -162,7 +162,7 @@
     Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
     ContainerInfo container =
             cluster.getStorageContainerManager().getContainerManager()
-                    .getContainer(ContainerID.valueof(containerID));
+                    .getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline =
             cluster.getStorageContainerManager().getPipelineManager()
                     .getPipeline(container.getPipelineID());
@@ -209,7 +209,7 @@
       containerID = entry.getBlockID().getContainerID();
       ContainerInfo container =
           cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
+              .getContainer(ContainerID.valueOf(containerID));
       Pipeline pipeline =
           cluster.getStorageContainerManager().getPipelineManager()
               .getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 89b588f..581dc2f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -164,6 +164,8 @@
       remoteGroupName, READ, ACCESS);
 
   private static String scmId = UUID.randomUUID().toString();
+  private static String clusterId = UUID.randomUUID().toString();
+
 
   /**
    * Create a MiniOzoneCluster for testing.
@@ -175,6 +177,7 @@
         .setNumDatanodes(3)
         .setTotalPipelineNumLimit(10)
         .setScmId(scmId)
+        .setClusterId(clusterId)
         .build();
     cluster.waitForClusterToBeReady();
     ozClient = OzoneClientFactory.getRpcClient(conf);
@@ -228,8 +231,8 @@
     return TestOzoneRpcClientAbstract.store;
   }
 
-  public static void setScmId(String scmId) {
-    TestOzoneRpcClientAbstract.scmId = scmId;
+  public static void setClusterId(String clusterId) {
+    TestOzoneRpcClientAbstract.clusterId = clusterId;
   }
 
   /**
@@ -1395,7 +1398,7 @@
     // Second, sum the data size from chunks in Container via containerID
     // and localID, make sure the size equals to the size from keyDetails.
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -1596,7 +1599,7 @@
       String containreBaseDir =
           container.getContainerData().getVolume().getHddsRootDir().getPath();
       File chunksLocationPath = KeyValueContainerLocationUtil
-          .getChunksLocationPath(containreBaseDir, scmId, containerID);
+          .getChunksLocationPath(containreBaseDir, clusterId, containerID);
       byte[] corruptData = "corrupted data".getBytes(UTF_8);
       // Corrupt the contents of chunk files
       for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index 610753a..91e187c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -171,7 +171,7 @@
         keyLocations.get(0).getLength());
 
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index f2e7b46..acbb9fa 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -79,6 +79,7 @@
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
+  private static final String CLUSTER_ID = UUID.randomUUID().toString();
   private static File testDir;
   private static OzoneConfiguration conf;
   private static OzoneBlockTokenSecretManager secretManager;
@@ -108,6 +109,7 @@
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(10)
         .setScmId(SCM_ID)
+        .setClusterId(CLUSTER_ID)
         .setCertificateClient(certificateClientTest)
         .build();
     String user = UserGroupInformation.getCurrentUser().getShortUserName();
@@ -131,7 +133,7 @@
     TestOzoneRpcClient.setStorageContainerLocationClient(
         storageContainerLocationClient);
     TestOzoneRpcClient.setStore(store);
-    TestOzoneRpcClient.setScmId(SCM_ID);
+    TestOzoneRpcClient.setClusterId(CLUSTER_ID);
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index 31acf4f..2c7c59c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -28,7 +28,7 @@
 import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -195,7 +195,7 @@
     for (long containerID : containerIdList) {
       ContainerInfo container =
           cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
+              .getContainer(ContainerID.valueOf(containerID));
       Pipeline pipeline =
           cluster.getStorageContainerManager().getPipelineManager()
               .getPipeline(container.getPipelineID());
@@ -227,8 +227,8 @@
       throws TimeoutException, InterruptedException, IOException {
     for (Pipeline pipeline1 : pipelineList) {
       // issue pipeline destroy command
-      cluster.getStorageContainerManager().getPipelineManager()
-          .finalizeAndDestroyPipeline(pipeline1, false);
+      cluster.getStorageContainerManager()
+          .getPipelineManager().closePipeline(pipeline1, false);
     }
 
     // wait for the pipeline to get destroyed in the datanodes
@@ -273,7 +273,7 @@
     for (long containerID : containerIdList) {
       ContainerInfo container =
           cluster.getStorageContainerManager().getContainerManager()
-              .getContainer(ContainerID.valueof(containerID));
+              .getContainer(ContainerID.valueOf(containerID));
       Pipeline pipeline =
           cluster.getStorageContainerManager().getPipelineManager()
               .getPipeline(container.getPipelineID());
@@ -294,7 +294,7 @@
         // send the order to close the container
         cluster.getStorageContainerManager().getEventQueue()
             .fireEvent(SCMEvents.CLOSE_CONTAINER,
-                ContainerID.valueof(containerID));
+                ContainerID.valueOf(containerID));
       }
     }
     int index = 0;
@@ -360,11 +360,11 @@
   }
 
   public static int countReplicas(long containerID, MiniOzoneCluster cluster) {
-    ContainerManager containerManager = cluster.getStorageContainerManager()
+    ContainerManagerV2 containerManager = cluster.getStorageContainerManager()
         .getContainerManager();
     try {
       Set<ContainerReplica> replicas = containerManager
-          .getContainerReplicas(ContainerID.valueof(containerID));
+          .getContainerReplicas(ContainerID.valueOf(containerID));
       LOG.info("Container {} has {} replicas on {}", containerID,
           replicas.size(),
           replicas.stream()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 3109595..1a54aa5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -39,6 +39,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.junit.AfterClass;
@@ -123,7 +124,7 @@
 
     long containerID = omKeyLocationInfo.getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -144,9 +145,12 @@
             .getCloseContainerHandler();
     int lastInvocationCount = closeContainerHandler.getInvocationCount();
     //send the order to close the container
+    SCMCommand<?> command = new CloseContainerCommand(
+        containerID, pipeline.getId());
+    command.setTerm(
+        cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
     cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerID, pipeline.getId()));
+        .addDatanodeCommand(datanodeDetails.getUuid(), command);
     GenericTestUtils
         .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails),
             500, 5 * 1000);
@@ -180,7 +184,7 @@
 
     long containerID = omKeyLocationInfo.getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -192,9 +196,12 @@
 
     // Send the order to close the container, give random pipeline id so that
     // the container will not be closed via RATIS
+    SCMCommand<?> command = new CloseContainerCommand(
+        containerID, pipeline.getId());
+    command.setTerm(
+        cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
     cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerID, pipeline.getId()));
+        .addDatanodeCommand(datanodeDetails.getUuid(), command);
 
     //double check if it's really closed (waitFor also throws an exception)
     // TODO: change the below line after implementing QUASI_CLOSED to CLOSED
@@ -205,7 +212,7 @@
     Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
 
     cluster.getStorageContainerManager().getPipelineManager()
-        .finalizeAndDestroyPipeline(pipeline, false);
+        .closePipeline(pipeline, false);
     Thread.sleep(5000);
     // Pipeline close should not affect a container in CLOSED state
     Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
@@ -233,7 +240,7 @@
 
     long containerID = omKeyLocationInfo.getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -243,9 +250,12 @@
     for (DatanodeDetails details : datanodes) {
       Assert.assertFalse(isContainerClosed(cluster, containerID, details));
       //send the order to close the container
+      SCMCommand<?> command = new CloseContainerCommand(
+          containerID, pipeline.getId());
+      command.setTerm(cluster.getStorageContainerManager()
+          .getScmContext().getTermOfLeader());
       cluster.getStorageContainerManager().getScmNodeManager()
-          .addDatanodeCommand(details.getUuid(),
-              new CloseContainerCommand(containerID, pipeline.getId()));
+          .addDatanodeCommand(details.getUuid(), command);
       int index = cluster.getHddsDatanodeIndex(details);
       Container dnContainer = cluster.getHddsDatanodes().get(index)
           .getDatanodeStateMachine().getContainer().getContainerSet()
@@ -296,7 +306,7 @@
 
     long containerID = omKeyLocationInfo.getContainerID();
     ContainerInfo container = cluster.getStorageContainerManager()
-        .getContainerManager().getContainer(ContainerID.valueof(containerID));
+        .getContainerManager().getContainer(ContainerID.valueOf(containerID));
     Pipeline pipeline = cluster.getStorageContainerManager()
         .getPipelineManager().getPipeline(container.getPipelineID());
     List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -308,7 +318,7 @@
 
     // close the pipeline
     cluster.getStorageContainerManager()
-        .getPipelineManager().finalizeAndDestroyPipeline(pipeline, false);
+        .getPipelineManager().closePipeline(pipeline, false);
 
     // All the containers in OPEN or CLOSING state should transition to
     // QUASI-CLOSED after pipeline close
@@ -320,9 +330,12 @@
 
     // Send close container command from SCM to datanode with forced flag as
     // true
+    SCMCommand<?> command = new CloseContainerCommand(
+        containerID, pipeline.getId(), true);
+    command.setTerm(
+        cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
     cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerID, pipeline.getId(), true));
+        .addDatanodeCommand(datanodeDetails.getUuid(), command);
     GenericTestUtils
         .waitFor(() -> isContainerClosed(
             cluster, containerID, datanodeDetails), 500, 5 * 1000);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index 731c6af..87e9f53 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
@@ -108,7 +109,7 @@
         cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    ContainerID containerId = ContainerID.valueof(
+    ContainerID containerId = ContainerID.valueOf(
         omKeyLocationInfo.getContainerID());
     ContainerInfo container = cluster.getStorageContainerManager()
         .getContainerManager().getContainer(containerId);
@@ -120,9 +121,12 @@
     DatanodeDetails datanodeDetails =
         cluster.getHddsDatanodes().get(0).getDatanodeDetails();
     //send the order to close the container
+    SCMCommand<?> command = new CloseContainerCommand(
+        containerId.getId(), pipeline.getId());
+    command.setTerm(
+        cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
     cluster.getStorageContainerManager().getScmNodeManager()
-        .addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerId.getId(), pipeline.getId()));
+        .addDatanodeCommand(datanodeDetails.getUuid(), command);
 
     GenericTestUtils.waitFor(() ->
             isContainerClosed(cluster, containerId.getId()),
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index fcaf006..a9131f8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -39,6 +39,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -134,8 +135,11 @@
         cluster.getStorageContainerManager().getScmNodeManager();
 
     //send the order to close the container
-    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
-            new CloseContainerCommand(containerId.getId(), pipeline.getId()));
+    SCMCommand<?> command = new CloseContainerCommand(
+        containerId.getId(), pipeline.getId());
+    command.setTerm(
+        cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
+    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), command);
 
     GenericTestUtils.waitFor(() ->
             isContainerClosed(hddsDatanodeService, containerId.getId()),
@@ -150,8 +154,10 @@
         containerId.getId()));
 
     // send delete container to the datanode
-    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
-            new DeleteContainerCommand(containerId.getId(), false));
+    command = new DeleteContainerCommand(containerId.getId(), false);
+    command.setTerm(
+        cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
+    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), command);
 
     GenericTestUtils.waitFor(() ->
             isContainerDeleted(hddsDatanodeService, containerId.getId()),
@@ -185,8 +191,11 @@
         cluster.getStorageContainerManager().getScmNodeManager();
 
     // Send delete container command with force flag set to false.
-    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
-        new DeleteContainerCommand(containerId.getId(), false));
+    SCMCommand<?> command = new DeleteContainerCommand(
+        containerId.getId(), false);
+    command.setTerm(
+        cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
+    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), command);
 
     // Here it should not delete it, and the container should exist in the
     // containerset
@@ -207,9 +216,10 @@
 
     // Now delete container with force flag set to true. now it should delete
     // container
-
-    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
-        new DeleteContainerCommand(containerId.getId(), true));
+    command = new DeleteContainerCommand(containerId.getId(), true);
+    command.setTerm(
+        cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
+    nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), command);
 
     GenericTestUtils.waitFor(() ->
             isContainerDeleted(hddsDatanodeService, containerId.getId()),
@@ -254,7 +264,7 @@
         cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    return ContainerID.valueof(
+    return ContainerID.valueOf(
         omKeyLocationInfo.getContainerID());
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index 4911f95..011ce2d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -223,7 +223,7 @@
     }
 
     @Override
-    public void setScmId(String scmId) {
+    public void setClusterId(String scmId) {
 
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index fb56c8c..2a0d98f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -112,7 +112,7 @@
       }
       HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet,
           volumeSet, handlers, context, metrics, null);
-      dispatcher.setScmId(UUID.randomUUID().toString());
+      dispatcher.setClusterId(UUID.randomUUID().toString());
 
       server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null);
       client = new XceiverClientGrpc(pipeline, conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index af67853..4c031d1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -217,7 +217,7 @@
       }
       HddsDispatcher dispatcher = new HddsDispatcher(
           conf, containerSet, volumeSet, handlers, context, metrics, null);
-      dispatcher.setScmId(scmId.toString());
+      dispatcher.setClusterId(scmId.toString());
       dispatcher.init();
 
       server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher,
@@ -274,7 +274,7 @@
     }
 
     @Override
-    public void setScmId(String scmId) {
+    public void setClusterId(String scmId) {
 
     }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 2fdcf39..13faacc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -167,7 +167,7 @@
     HddsDispatcher hddsDispatcher = new HddsDispatcher(
         conf, containerSet, volumeSet, handlers, context, metrics,
         new BlockTokenVerifier(new SecurityConfig((conf)), caClient));
-    hddsDispatcher.setScmId(scmId.toString());
+    hddsDispatcher.setClusterId(scmId.toString());
     return hddsDispatcher;
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/TestDatanodeLayoutUpgradeTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/TestDatanodeLayoutUpgradeTool.java
new file mode 100644
index 0000000..fa7a3c1
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/TestDatanodeLayoutUpgradeTool.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.dn;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.debug.DatanodeLayout;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.time.Instant;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.UUID;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
+
+/**
+ * Test Datanode Layout Upgrade Tool.
+ */
+public class TestDatanodeLayoutUpgradeTool {
+  private MiniOzoneCluster cluster = null;
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3).setTotalPipelineNumLimit(2).build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @After
+  public void destroy() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  private void writeData() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    Instant testStartTime = Instant.now();
+
+    ObjectStore store = cluster.getClient().getObjectStore();
+
+    String value = "sample value";
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    for (int i = 0; i < 10; i++) {
+      String keyName = UUID.randomUUID().toString();
+
+      OzoneOutputStream out = bucket.createKey(keyName,
+          value.getBytes(UTF_8).length, RATIS,
+          THREE, new HashMap<>());
+      out.write(value.getBytes(UTF_8));
+      out.close();
+      OzoneKey key = bucket.getKey(keyName);
+      Assert.assertEquals(keyName, key.getName());
+      OzoneInputStream is = bucket.readKey(keyName);
+      byte[] fileContent = new byte[value.getBytes(UTF_8).length];
+      is.read(fileContent);
+      Assert.assertEquals(value, new String(fileContent, UTF_8));
+      Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
+      Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
+    }
+
+    // wait for the container report to propagate to SCM
+    Thread.sleep(5000);
+  }
+
+  @Test
+  public void testDatanodeLayoutVerify() throws Exception {
+    writeData();
+    cluster.stop();
+
+    List<HddsDatanodeService> dns = cluster.getHddsDatanodes();
+    OzoneConfiguration c1 = dns.get(0).getConf();
+    Collection<String> paths = MutableVolumeSet.getDatanodeStorageDirs(c1);
+
+    for (String p : paths) {
+      // Verify that tool is able to verify the storage path
+      List<HddsVolume> volumes = DatanodeLayout.runUpgrade(c1, p, true);
+      Assert.assertEquals(0, volumes.size());
+
+      HddsVolume.Builder volumeBuilder = new HddsVolume.Builder(p)
+          .conf(c1);
+      HddsVolume vol = volumeBuilder.build();
+
+      // Rename the path and verify that the tool fails
+      File clusterDir = new File(vol.getHddsRootDir(), vol.getClusterID());
+      File renamePath = new File(vol.getHddsRootDir(),
+          UUID.randomUUID().toString());
+      Assert.assertTrue(clusterDir.renameTo(renamePath));
+
+      List<HddsVolume> failedVols = DatanodeLayout.runUpgrade(c1, p, true);
+      Assert.assertEquals(1, failedVols.size());
+    }
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
index bd5926e..cadd51f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
@@ -28,7 +28,7 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
@@ -181,10 +181,10 @@
     // wait for the incremental container report to propagate to SCM
     Thread.sleep(5000);
 
-    ContainerManager cm = cluster.getStorageContainerManager()
+    ContainerManagerV2 cm = cluster.getStorageContainerManager()
         .getContainerManager();
     Set<ContainerReplica> replicas = cm.getContainerReplicas(
-        ContainerID.valueof(c.getContainerData().getContainerID()));
+        ContainerID.valueOf(c.getContainerData().getContainerID()));
     Assert.assertEquals(1, replicas.size());
     ContainerReplica r = replicas.iterator().next();
     Assert.assertEquals(StorageContainerDatanodeProtocolProtos.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
index 79fa626..d4e9580 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
@@ -74,7 +74,6 @@
     ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
     ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
     conf.setFromObject(ratisServerConfig);
-
     RatisClientConfig.RaftConfig raftClientConfig =
         conf.getObject(RatisClientConfig.RaftConfig.class);
     raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
index 8e8109c..290eb5d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java
@@ -130,6 +130,6 @@
     PipelineManager pipelineManager =
         cluster.getStorageContainerManager().getPipelineManager();
     Pipeline pipeline = pipelineManager.getPipeline(id);
-    pipelineManager.finalizeAndDestroyPipeline(pipeline, false);
+    pipelineManager.closePipeline(pipeline, false);
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
index 6fdee0e..89c1cbe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -130,7 +130,7 @@
     ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
     Set<ContainerReplica> replicas =
         scm.getContainerManager().getContainerReplicas(
-            new ContainerID(keyInfo.getContainerID()));
+            ContainerID.valueOf(keyInfo.getContainerID()));
     Assert.assertTrue(replicas.size() == 1);
     replicas.stream().forEach(rp ->
         Assert.assertTrue(rp.getDatanodeDetails().getParent() != null));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index 57e431f..493bf9a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -49,6 +49,8 @@
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.net.NodeSchema;
@@ -170,6 +172,8 @@
     SCMConfigurator configurator = new SCMConfigurator();
     configurator.setScmNodeManager(nodeManager);
     configurator.setNetworkTopology(clusterMap);
+    configurator.setSCMHAManager(MockSCMHAManager.getInstance(true));
+    configurator.setScmContext(SCMContext.emptyContext());
     scm = TestUtils.getScm(conf, configurator);
     scm.start();
     scm.exitSafeMode();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
index 72bc510..e296cf5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
@@ -27,13 +27,13 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.util.ExitManager;
@@ -57,7 +57,7 @@
 @Timeout(500)
 public class TestOMRatisSnapshots {
 
-  private MiniOzoneHAClusterImpl cluster = null;
+  private MiniOzoneOMHAClusterImpl cluster = null;
   private ObjectStore objectStore;
   private OzoneConfiguration conf;
   private String clusterId;
@@ -88,7 +88,7 @@
     conf.setLong(
         OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY,
         SNAPSHOT_THRESHOLD);
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+    cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
         .setClusterId(clusterId)
         .setScmId(scmId)
         .setOMServiceId("om-service-test1")
@@ -135,9 +135,9 @@
     OzoneManagerRatisServer leaderRatisServer = leaderOM.getOmRatisServer();
 
     // Find the inactive OM
-    String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId();
+    String followerNodeId = leaderOM.getPeerNodes().get(0).getNodeId();
     if (cluster.isOMActive(followerNodeId)) {
-      followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId();
+      followerNodeId = leaderOM.getPeerNodes().get(1).getNodeId();
     }
     OzoneManager followerOM = cluster.getOzoneManager(followerNodeId);
 
@@ -145,11 +145,11 @@
     List<String> keys = writeKeysToIncreaseLogIndex(leaderRatisServer, 200);
 
     // Get the latest db checkpoint from the leader OM.
-    OMTransactionInfo omTransactionInfo =
-        OMTransactionInfo.readTransactionInfo(leaderOM.getMetadataManager());
+    TransactionInfo transactionInfo =
+        TransactionInfo.readTransactionInfo(leaderOM.getMetadataManager());
     TermIndex leaderOMTermIndex =
-        TermIndex.valueOf(omTransactionInfo.getTerm(),
-            omTransactionInfo.getTransactionIndex());
+        TermIndex.valueOf(transactionInfo.getTerm(),
+            transactionInfo.getTransactionIndex());
     long leaderOMSnaphsotIndex = leaderOMTermIndex.getIndex();
     long leaderOMSnapshotTermIndex = leaderOMTermIndex.getTerm();
 
@@ -200,9 +200,9 @@
     OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId);
 
     // Find the inactive OM and start it
-    String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId();
+    String followerNodeId = leaderOM.getPeerNodes().get(0).getNodeId();
     if (cluster.isOMActive(followerNodeId)) {
-      followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId();
+      followerNodeId = leaderOM.getPeerNodes().get(1).getNodeId();
     }
     cluster.startInactiveOM(followerNodeId);
 
@@ -255,9 +255,9 @@
     OzoneManagerRatisServer leaderRatisServer = leaderOM.getOmRatisServer();
 
     // Find the inactive OM
-    String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId();
+    String followerNodeId = leaderOM.getPeerNodes().get(0).getNodeId();
     if (cluster.isOMActive(followerNodeId)) {
-      followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId();
+      followerNodeId = leaderOM.getPeerNodes().get(1).getNodeId();
     }
     OzoneManager followerOM = cluster.getOzoneManager(followerNodeId);
 
@@ -267,7 +267,7 @@
     DBCheckpoint leaderDbCheckpoint = leaderOM.getMetadataManager().getStore()
         .getCheckpoint(false);
     Path leaderCheckpointLocation = leaderDbCheckpoint.getCheckpointLocation();
-    OMTransactionInfo leaderCheckpointTrxnInfo = OzoneManagerRatisUtils
+    TransactionInfo leaderCheckpointTrxnInfo = OzoneManagerRatisUtils
         .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation);
 
     // Corrupt the leader checkpoint and install that on the OM. The
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
index 6b9198c..c06c504 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
@@ -31,9 +31,9 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
 import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -197,14 +197,14 @@
     final String omNode3Id = "omNode3";
 
     String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id;
-    String omNodesKey = OmUtils.addKeySuffixes(
+    String omNodesKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
 
     String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode1Id);
     String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode2Id);
     String omNode3RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode3Id);
 
-    String omNode3RatisPortKey = OmUtils.addKeySuffixes(
+    String omNode3RatisPortKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNode3Id);
 
     conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
@@ -272,14 +272,14 @@
     final String node3Hostname = "node3.example.com";
 
     String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id;
-    String omNodesKey = OmUtils.addKeySuffixes(
+    String omNodesKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
 
     String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode1Id);
     String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode2Id);
     String omNode3RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode3Id);
 
-    String omNode3RatisPortKey = OmUtils.addKeySuffixes(
+    String omNode3RatisPortKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNode3Id);
 
     conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
@@ -350,7 +350,7 @@
     String omNode2Id = "omNode2";
     String omNode3Id = "omNode3";
     String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id;
-    String omNodesKey = OmUtils.addKeySuffixes(
+    String omNodesKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
 
     String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode1Id);
@@ -409,7 +409,7 @@
     String omNode2Id = "omNode2";
     String omNode3Id = "omNode3";
     String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id;
-    String omNodesKey = OmUtils.addKeySuffixes(
+    String omNodesKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
 
     conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
@@ -445,9 +445,9 @@
     // Set the node Ids for the 2 services. The nodeIds need to be
     // distinch within one service. The ids can overlap between
     // different services.
-    String om1NodesKey = OmUtils.addKeySuffixes(
+    String om1NodesKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_NODES_KEY, om1ServiceId);
-    String om2NodesKey = OmUtils.addKeySuffixes(
+    String om2NodesKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_NODES_KEY, om2ServiceId);
     conf.set(om1NodesKey, omNodesKeyValue);
     conf.set(om2NodesKey, omNodesKeyValue);
@@ -484,7 +484,7 @@
   }
 
   private String getOMAddrKeyWithSuffix(String serviceId, String nodeId) {
-    return OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+    return ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
         serviceId, nodeId);
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
index 92afc60..ec38936 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -68,7 +68,7 @@
  */
 public abstract class TestOzoneManagerHA {
 
-  private MiniOzoneHAClusterImpl cluster = null;
+  private MiniOzoneOMHAClusterImpl cluster = null;
   private ObjectStore objectStore;
   private OzoneConfiguration conf;
   private String clusterId;
@@ -88,7 +88,7 @@
   @Rule
   public Timeout timeout = Timeout.seconds(300);;
 
-  public MiniOzoneHAClusterImpl getCluster() {
+  public MiniOzoneOMHAClusterImpl getCluster() {
     return cluster;
   }
 
@@ -164,7 +164,7 @@
      */
     conf.set(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, "10s");
     conf.set(OZONE_KEY_DELETING_LIMIT_PER_TASK, "2");
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+    cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
         .setClusterId(clusterId)
         .setScmId(scmId)
         .setOMServiceId(omServiceId)
@@ -189,7 +189,7 @@
    * Create a key in the bucket.
    * @return the key name.
    */
-  static String createKey(OzoneBucket ozoneBucket) throws IOException {
+  public static String createKey(OzoneBucket ozoneBucket) throws IOException {
     String keyName = "key" + RandomStringUtils.randomNumeric(5);
     String data = "data" + RandomStringUtils.randomNumeric(5);
     OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java
index f6b9445..92adfd2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java
@@ -62,7 +62,7 @@
 import java.util.Iterator;
 import java.util.UUID;
 
-import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT;
+import static org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl.NODE_FAILURE_TIMEOUT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_DEFAULT;
 
 import static org.apache.ratis.metrics.RatisMetrics.RATIS_APPLICATION_NAME_METRICS;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java
index f340c92..c02ceca 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java
@@ -46,7 +46,7 @@
 import java.util.UUID;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT;
+import static org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl.NODE_FAILURE_TIMEOUT;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
@@ -75,7 +75,7 @@
   @Test
   public void testOneOMNodeDown() throws Exception {
     getCluster().stopOzoneManager(1);
-    Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
+    Thread.sleep(NODE_FAILURE_TIMEOUT * 4);
 
     createVolumeTest(true);
 
@@ -90,7 +90,7 @@
   public void testTwoOMNodesDown() throws Exception {
     getCluster().stopOzoneManager(1);
     getCluster().stopOzoneManager(2);
-    Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
+    Thread.sleep(NODE_FAILURE_TIMEOUT * 4);
 
     createVolumeTest(false);
 
@@ -272,7 +272,7 @@
     // Stop one of the ozone manager, to see when the OM leader changes
     // multipart upload is happening successfully or not.
     getCluster().stopOzoneManager(leaderOMNodeId);
-    Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
+    Thread.sleep(NODE_FAILURE_TIMEOUT * 4);
 
     createMultipartKeyAndReadKey(ozoneBucket, keyName, uploadID);
 
@@ -304,7 +304,7 @@
     String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
 
     getCluster().stopOzoneManager(leaderOMNodeId);
-    Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
+    Thread.sleep(NODE_FAILURE_TIMEOUT * 4);
     createKeyTest(true); // failover should happen to new node
 
     long numTimesTriedToSameNode = omFailoverProxyProvider.getWaitTime()
@@ -505,9 +505,9 @@
 
     // Get follower OMs
     OzoneManager followerOM1 = getCluster().getOzoneManager(
-        leaderOM.getPeerNodes().get(0).getOMNodeId());
+        leaderOM.getPeerNodes().get(0).getNodeId());
     OzoneManager followerOM2 = getCluster().getOzoneManager(
-        leaderOM.getPeerNodes().get(1).getOMNodeId());
+        leaderOM.getPeerNodes().get(1).getNodeId());
 
     // Do some transactions so that the log index increases
     String userName = "user" + RandomStringUtils.randomNumeric(5);
@@ -605,7 +605,7 @@
 
     // Stop leader OM, and then validate list parts.
     stopLeaderOM();
-    Thread.sleep(NODE_FAILURE_TIMEOUT * 2);
+    Thread.sleep(NODE_FAILURE_TIMEOUT * 4);
 
     validateListParts(ozoneBucket, keyName, uploadID, partsMap);
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/parser/TestOMRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/parser/TestOMRatisLogParser.java
index c03af90..e9e987e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/parser/TestOMRatisLogParser.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/parser/TestOMRatisLogParser.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.om.parser;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
@@ -52,7 +52,7 @@
   @Rule
   public Timeout timeout = Timeout.seconds(300);
 
-  private MiniOzoneHAClusterImpl cluster = null;
+  private MiniOzoneOMHAClusterImpl cluster = null;
   private final ByteArrayOutputStream out = new ByteArrayOutputStream();
   private final ByteArrayOutputStream err = new ByteArrayOutputStream();
 
@@ -62,7 +62,7 @@
     String scmId = UUID.randomUUID().toString();
     String omServiceId = "omServiceId1";
     OzoneConfiguration conf = new OzoneConfiguration();
-    cluster =  (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+    cluster =  (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
         .setClusterId(clusterId)
         .setScmId(scmId)
         .setOMServiceId(omServiceId)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
index 92416c1..c35b45b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java
@@ -24,7 +24,7 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
@@ -32,7 +32,7 @@
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OmFailoverProxyUtil;
 import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 
 import org.junit.After;
@@ -47,7 +47,7 @@
  */
 public class TestOzoneManagerSnapshotProvider {
 
-  private MiniOzoneHAClusterImpl cluster = null;
+  private MiniOzoneOMHAClusterImpl cluster = null;
   private ObjectStore objectStore;
   private OzoneConfiguration conf;
   private String clusterId;
@@ -69,7 +69,7 @@
     omServiceId = "om-service-test1";
     conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true);
     conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+    cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
         .setClusterId(clusterId)
         .setScmId(scmId)
         .setOMServiceId(omServiceId)
@@ -114,7 +114,7 @@
     OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId);
 
     // Get a follower OM
-    String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId();
+    String followerNodeId = leaderOM.getPeerNodes().get(0).getNodeId();
     OzoneManager followerOM = cluster.getOzoneManager(followerNodeId);
 
     // Download latest checkpoint from leader OM to follower OM
@@ -134,7 +134,7 @@
   private long getDownloadedSnapshotIndex(DBCheckpoint dbCheckpoint)
       throws Exception {
 
-    OMTransactionInfo trxnInfoFromCheckpoint =
+    TransactionInfo trxnInfoFromCheckpoint =
         OzoneManagerRatisUtils.getTrxnInfoFromCheckpoint(conf,
             dbCheckpoint.getCheckpointLocation());
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
index 1b2a915..d42606e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
@@ -17,24 +17,13 @@
 
 package org.apache.hadoop.ozone.recon;
 
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
-import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
 import java.util.Optional;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
@@ -55,6 +44,17 @@
 import org.junit.rules.Timeout;
 import org.slf4j.event.Level;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
+import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
 /**
  * Recon's passive SCM integration tests.
  */
@@ -115,8 +115,8 @@
         "Trying to create pipeline in Recon, which is prohibited!",
         () -> reconPipelineManager.createPipeline(RATIS, ONE));
 
-    ContainerManager scmContainerManager = scm.getContainerManager();
-    assertTrue(scmContainerManager.getContainerIDs().isEmpty());
+    ContainerManagerV2 scmContainerManager = scm.getContainerManager();
+    assertTrue(scmContainerManager.getContainers().isEmpty());
 
     // Verify if all the 3 nodes are registered with Recon.
     NodeManager reconNodeManager = reconScm.getScmNodeManager();
@@ -125,7 +125,7 @@
         reconNodeManager.getAllNodes().size());
 
     // Create container
-    ContainerManager reconContainerManager = reconScm.getContainerManager();
+    ContainerManagerV2 reconContainerManager = reconScm.getContainerManager();
     ContainerInfo containerInfo =
         scmContainerManager.allocateContainer(RATIS, ONE, "test");
     long containerID = containerInfo.getContainerID();
@@ -155,10 +155,10 @@
     StorageContainerManager scm = cluster.getStorageContainerManager();
 
     // Stop Recon
-    ContainerManager scmContainerManager = scm.getContainerManager();
-    assertTrue(scmContainerManager.getContainerIDs().isEmpty());
-    ContainerManager reconContainerManager = reconScm.getContainerManager();
-    assertTrue(reconContainerManager.getContainerIDs().isEmpty());
+    ContainerManagerV2 scmContainerManager = scm.getContainerManager();
+    assertTrue(scmContainerManager.getContainers().isEmpty());
+    ContainerManagerV2 reconContainerManager = reconScm.getContainerManager();
+    assertTrue(reconContainerManager.getContainers().isEmpty());
 
     LambdaTestUtils.await(60000, 5000,
         () -> (reconScm.getScmNodeManager().getAllNodes().size() == 3));
@@ -174,7 +174,7 @@
         scmPipelineManager.getPipeline(containerInfo.getPipelineID());
     XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
     runTestOzoneContainerViaDataNode(containerID, client);
-    assertFalse(scmContainerManager.getContainerIDs().isEmpty());
+    assertFalse(scmContainerManager.getContainers().isEmpty());
 
     // Close a pipeline
     Optional<Pipeline> pipelineToClose = scmPipelineManager
@@ -183,7 +183,7 @@
         .filter(p -> !p.getId().equals(containerInfo.getPipelineID()))
         .findFirst();
     assertTrue(pipelineToClose.isPresent());
-    scmPipelineManager.finalizeAndDestroyPipeline(pipelineToClose.get(), false);
+    scmPipelineManager.closePipeline(pipelineToClose.get(), false);
 
     // Start Recon
     cluster.startRecon();
@@ -204,6 +204,6 @@
 
     LambdaTestUtils.await(90000, 5000,
         () -> (newReconScm.getContainerManager()
-            .exists(ContainerID.valueof(containerID))));
+            .containerExist(ContainerID.valueOf(containerID))));
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
index 14993f6..3e54a96 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
@@ -17,20 +17,13 @@
 
 package org.apache.hadoop.ozone.recon;
 
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
-import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode;
-import static org.junit.Assert.assertEquals;
-
 import java.time.Duration;
 import java.util.List;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@@ -43,11 +36,18 @@
 import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 import org.junit.rules.Timeout;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS;
+import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode;
+
 /**
  * Integration Tests for Recon's tasks.
  */
@@ -102,7 +102,7 @@
     LambdaTestUtils.await(60000, 5000,
         () -> (reconPipelineManager.getPipelines().size() >= 1));
 
-    ContainerManager scmContainerManager = scm.getContainerManager();
+    ContainerManagerV2 scmContainerManager = scm.getContainerManager();
     ReconContainerManager reconContainerManager =
         (ReconContainerManager) reconScm.getContainerManager();
     ContainerInfo containerInfo =
@@ -114,7 +114,7 @@
     runTestOzoneContainerViaDataNode(containerID, client);
 
     // Make sure Recon got the container report with new container.
-    assertEquals(scmContainerManager.getContainerIDs(),
+    Assert.assertEquals(scmContainerManager.getContainerIDs(),
         reconContainerManager.getContainerIDs());
 
     // Bring down the Datanode that had the container replica.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
index 417cd7c..a603c14 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java
@@ -31,7 +31,7 @@
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
@@ -49,13 +49,13 @@
 import org.junit.rules.Timeout;
 
 /**
- * This class sets up a MiniOzoneHACluster to test with Recon.
+ * This class sets up a MiniOzoneOMHACluster to test with Recon.
  */
 public class TestReconWithOzoneManagerHA {
   @Rule
   public Timeout timeout = Timeout.seconds(300);;
 
-  private MiniOzoneHAClusterImpl cluster;
+  private MiniOzoneOMHAClusterImpl cluster;
   private ObjectStore objectStore;
   private static final String OM_SERVICE_ID = "omService1";
   private static final String VOL_NAME = "testrecon";
@@ -70,7 +70,7 @@
     dbConf.setSyncOption(true);
     conf.setFromObject(dbConf);
 
-    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+    cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
         .setClusterId(UUID.randomUUID().toString())
         .setScmId(UUID.randomUUID().toString())
         .setOMServiceId(OM_SERVICE_ID)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
index fe05859..888422a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
@@ -105,7 +105,7 @@
     ContainerInfo container = scm.getContainerManager().getContainers().get(0);
     Pipeline pipeline = scm.getPipelineManager()
         .getPipeline(container.getPipelineID());
-    scm.getPipelineManager().finalizeAndDestroyPipeline(pipeline, false);
+    scm.getPipelineManager().closePipeline(pipeline, false);
     GenericTestUtils.waitFor(() ->
             container.getState() == HddsProtos.LifeCycleState.CLOSED,
         200, 30000);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
index efe02f0..ecffb9e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
@@ -125,7 +125,7 @@
         .collect(Collectors.toList());
     Pipeline targetPipeline = pipelines.get(0);
     List<DatanodeDetails> nodes = targetPipeline.getNodes();
-    manager.finalizeAndDestroyPipeline(pipelines.get(0), true);
+    manager.closePipeline(pipelines.get(0), true);
 
     // kill datanode to trigger under-replicated container replication
     cluster.shutdownHddsDatanode(nodes.get(0));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java
new file mode 100644
index 0000000..0659091
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java
@@ -0,0 +1,358 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.scm;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.ha.SCMHAConfiguration;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl;
+import org.apache.hadoop.hdds.scm.ha.SCMStateMachine;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.utils.HAUtils;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.ozone.util.ExitManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.ratis.server.protocol.TermIndex;
+
+import static org.junit.Assert.assertTrue;
+
+import org.junit.Assert;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.Disabled;
+import org.slf4j.Logger;
+import org.slf4j.event.Level;
+
+/**
+ * Tests the Ratis snapshot feature in SCM.
+ */
+@Timeout(500)
+public class TestSCMInstallSnapshotWithHA {
+
+  private MiniOzoneHAClusterImpl cluster = null;
+  private OzoneConfiguration conf;
+  private String clusterId;
+  private String scmId;
+  private String omServiceId;
+  private String scmServiceId;
+  private int numOfOMs = 1;
+  private int numOfSCMs = 3;
+
+  private static final long SNAPSHOT_THRESHOLD = 5;
+ // private static final int LOG_PURGE_GAP = 5;
+
+  /**
+   * Create a MiniOzoneCluster for testing.
+   *
+   * @throws IOException
+   */
+  @BeforeEach
+  public void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omServiceId = "om-service-test1";
+    scmServiceId = "scm-service-test1";
+    SCMHAConfiguration scmhaConfiguration =
+        conf.getObject(SCMHAConfiguration.class);
+  //  scmhaConfiguration.setRaftLogPurgeEnabled(true);
+  //  scmhaConfiguration.setRaftLogPurgeGap(LOG_PURGE_GAP);
+    scmhaConfiguration.setRatisSnapshotThreshold(SNAPSHOT_THRESHOLD);
+    conf.setFromObject(scmhaConfiguration);
+
+    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+        .setClusterId(clusterId)
+        .setScmId(scmId)
+        .setOMServiceId(omServiceId)
+        .setSCMServiceId(scmServiceId)
+        .setNumOfOzoneManagers(numOfOMs)
+        .setNumOfStorageContainerManagers(numOfSCMs)
+        .setNumOfActiveSCMs(2)
+        .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterEach
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * This test is disabled for now as there seems to be an issue with
+   * Ratis install Snapshot code. In ratis while a new node gets added,
+   * unless and until the node gets added to the voter list, the follower state
+   * is not updated with leader info. So, while an install snapshot notification
+   * is received in the leader, the leader info is not set and hence, out of
+   * ratis transfer using the same leader info doesn't work.
+   *
+   * TODO: Fix this
+   * */
+  @Test
+  @Disabled
+  public void testInstallSnapshot() throws Exception {
+    // Get the leader SCM
+    StorageContainerManager leaderSCM = getLeader(cluster);
+    String leaderNodeId = leaderSCM.getScmNodeDetails().getNodeId();
+    Assert.assertNotNull(leaderSCM);
+    // Find the inactive SCM
+    String followerId = getInactiveSCM(cluster).getScmId();
+
+    StorageContainerManager follower = cluster.getSCM(followerId);
+    // Do some transactions so that the log index increases
+    List<ContainerInfo> containers = writeToIncreaseLogIndex(leaderSCM, 200);
+
+    // Get the latest db checkpoint from the leader SCM.
+    TransactionInfo transactionInfo =
+        leaderSCM.getScmHAManager().asSCMHADBTransactionBuffer()
+            .getLatestTrxInfo();
+    TermIndex leaderTermIndex =
+        TermIndex.valueOf(transactionInfo.getTerm(),
+            transactionInfo.getTransactionIndex());
+    long leaderSnaphsotIndex = leaderTermIndex.getIndex();
+    long leaderSnapshotTermIndex = leaderTermIndex.getTerm();
+
+    DBCheckpoint leaderDbCheckpoint =
+        leaderSCM.getScmMetadataStore().getStore().getCheckpoint(false);
+
+    // Start the inactive
+    cluster.startInactiveSCM(followerId);
+
+    // The recently started  should be lagging behind the leader .
+    long followerLastAppliedIndex =
+        follower.getScmHAManager().getRatisServer().getSCMStateMachine()
+            .getLastAppliedTermIndex().getIndex();
+    assertTrue(
+        followerLastAppliedIndex < leaderSnaphsotIndex);
+
+    SCMHAManagerImpl scmhaManager =
+        (SCMHAManagerImpl) (follower.getScmHAManager());
+    // Install leader 's db checkpoint on the lagging .
+    scmhaManager.installCheckpoint(leaderNodeId, leaderDbCheckpoint);
+
+    SCMStateMachine followerStateMachine =
+        follower.getScmHAManager().getRatisServer().getSCMStateMachine();
+    // After the new checkpoint is installed, the follower
+    // lastAppliedIndex must >= the snapshot index of the checkpoint. It
+    // could be great than snapshot index if there is any conf entry from ratis.
+    followerLastAppliedIndex = followerStateMachine
+            .getLastAppliedTermIndex().getIndex();
+    assertTrue(followerLastAppliedIndex >= leaderSnaphsotIndex);
+    assertTrue(followerStateMachine
+        .getLastAppliedTermIndex().getTerm() >= leaderSnapshotTermIndex);
+
+    // Verify that the follower 's DB contains the transactions which were
+    // made while it was inactive.
+    SCMMetadataStore followerMetaStore = follower.getScmMetadataStore();
+    for (ContainerInfo containerInfo : containers) {
+      Assert.assertNotNull(followerMetaStore.getContainerTable()
+          .get(containerInfo.containerID()));
+    }
+  }
+
+  @Test
+  public void testInstallOldCheckpointFailure() throws Exception {
+    // Get the leader SCM
+    StorageContainerManager leaderSCM = getLeader(cluster);
+    String leaderNodeId = leaderSCM.getScmNodeDetails().getNodeId();
+    String followerId = getInactiveSCM(cluster).getScmId();
+    // Find the inactive SCM
+
+    StorageContainerManager follower = cluster.getSCM(followerId);
+    cluster.startInactiveSCM(followerId);
+    follower.exitSafeMode();
+    DBCheckpoint leaderDbCheckpoint = leaderSCM.getScmMetadataStore().getStore()
+        .getCheckpoint(false);
+
+    SCMStateMachine leaderSM =
+        leaderSCM.getScmHAManager().getRatisServer().getSCMStateMachine();
+    TermIndex lastTermIndex = leaderSM.getLastAppliedTermIndex();
+
+    SCMStateMachine followerSM =
+        follower.getScmHAManager().getRatisServer().getSCMStateMachine();
+    follower.getScmMetadataStore().getTransactionInfoTable().
+        put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.builder()
+        .setCurrentTerm(lastTermIndex.getTerm())
+            .setTransactionIndex(lastTermIndex.getIndex() + 100).build());
+    // Advance the follower
+    followerSM.notifyTermIndexUpdated(lastTermIndex.getTerm(),
+        lastTermIndex.getIndex() + 100);
+
+    GenericTestUtils.setLogLevel(SCMHAManagerImpl.getLogger(), Level.INFO);
+    GenericTestUtils.LogCapturer logCapture =
+        GenericTestUtils.LogCapturer.captureLogs(SCMHAManagerImpl.getLogger());
+
+    // Install the old checkpoint on the follower . This should fail as the
+    // follower is already ahead of that transactionLogIndex and the
+    // state should be reloaded.
+    TermIndex followerTermIndex = followerSM.getLastAppliedTermIndex();
+    SCMHAManagerImpl scmhaManager =
+        (SCMHAManagerImpl) (follower.getScmHAManager());
+    TermIndex newTermIndex =
+        scmhaManager.installCheckpoint(leaderNodeId, leaderDbCheckpoint);
+
+    String errorMsg = "Reloading old state of SCM";
+    Assert.assertTrue(logCapture.getOutput().contains(errorMsg));
+    Assert.assertNull(" installed checkpoint even though checkpoint " +
+        "logIndex is less than it's lastAppliedIndex", newTermIndex);
+    Assert.assertEquals(followerTermIndex,
+        followerSM.getLastAppliedTermIndex());
+    Assert.assertFalse(followerSM.getLifeCycleState().isPausingOrPaused());
+  }
+
+  @Test
+  public void testInstallCorruptedCheckpointFailure() throws Exception {
+    StorageContainerManager leaderSCM = getLeader(cluster);
+    String leaderNodeId = leaderSCM.getScmId();
+    // Find the inactive SCM
+    String followerId = getInactiveSCM(cluster).getScmId();
+    StorageContainerManager follower = cluster.getSCM(followerId);
+    // Do some transactions so that the log index increases
+    writeToIncreaseLogIndex(leaderSCM, 100);
+    File oldDBLocation =
+        follower.getScmMetadataStore().getStore().getDbLocation();
+
+    SCMStateMachine sm =
+        follower.getScmHAManager().getRatisServer().getSCMStateMachine();
+    TermIndex termIndex = sm.getLastAppliedTermIndex();
+    DBCheckpoint leaderDbCheckpoint = leaderSCM.getScmMetadataStore().getStore()
+        .getCheckpoint(false);
+    Path leaderCheckpointLocation = leaderDbCheckpoint.getCheckpointLocation();
+    TransactionInfo leaderCheckpointTrxnInfo = HAUtils
+        .getTrxnInfoFromCheckpoint(conf, leaderCheckpointLocation,
+            new SCMDBDefinition());
+
+    Assert.assertNotNull(leaderCheckpointLocation);
+    // Take a backup of the current DB
+    String dbBackupName =
+        "SCM_CHECKPOINT_BACKUP" + termIndex.getIndex() + "_" + System
+            .currentTimeMillis();
+    File dbDir = oldDBLocation.getParentFile();
+    File checkpointBackup = new File(dbDir, dbBackupName);
+
+    // Take a backup of the leader checkpoint
+    Files.copy(leaderCheckpointLocation.toAbsolutePath(),
+        checkpointBackup.toPath());
+    // Corrupt the leader checkpoint and install that on the follower. The
+    // operation should fail and  should shutdown.
+    boolean delete = true;
+    for (File file : leaderCheckpointLocation.toFile()
+        .listFiles()) {
+      if (file.getName().contains(".sst")) {
+        if (delete) {
+          file.delete();
+          delete = false;
+        } else {
+          delete = true;
+        }
+      }
+    }
+
+    SCMHAManagerImpl scmhaManager =
+        (SCMHAManagerImpl) (follower.getScmHAManager());
+    GenericTestUtils.setLogLevel(SCMHAManagerImpl.getLogger(), Level.ERROR);
+    GenericTestUtils.LogCapturer logCapture =
+        GenericTestUtils.LogCapturer.captureLogs(SCMHAManagerImpl.getLogger());
+    scmhaManager.setExitManagerForTesting(new DummyExitManager());
+
+    scmhaManager.installCheckpoint(leaderNodeId, leaderCheckpointLocation,
+        leaderCheckpointTrxnInfo);
+
+    Assert.assertTrue(logCapture.getOutput()
+        .contains("Failed to reload SCM state and instantiate services."));
+    Assert.assertTrue(sm.getLifeCycleState().isPausingOrPaused());
+
+    // Verify correct reloading
+    HAUtils
+        .replaceDBWithCheckpoint(leaderCheckpointTrxnInfo.getTransactionIndex(),
+            oldDBLocation, checkpointBackup.toPath(),
+            OzoneConsts.SCM_DB_BACKUP_PREFIX);
+    scmhaManager.startServices();
+    sm.unpause(leaderCheckpointTrxnInfo.getTerm(),
+        leaderCheckpointTrxnInfo.getTransactionIndex());
+    Assert.assertTrue(sm.getLastAppliedTermIndex()
+        .equals(leaderCheckpointTrxnInfo.getTermIndex()));
+  }
+
+  private List<ContainerInfo> writeToIncreaseLogIndex(
+      StorageContainerManager scm, long targetLogIndex)
+      throws IOException, InterruptedException {
+    List<ContainerInfo> containers = new ArrayList<>();
+    SCMStateMachine stateMachine =
+        scm.getScmHAManager().getRatisServer().getSCMStateMachine();
+    long logIndex = scm.getScmHAManager().getRatisServer().getSCMStateMachine()
+        .getLastAppliedTermIndex().getIndex();
+    while (logIndex < targetLogIndex) {
+      containers.add(scm.getContainerManager()
+          .allocateContainer(HddsProtos.ReplicationType.RATIS,
+              HddsProtos.ReplicationFactor.THREE,
+              TestSCMInstallSnapshotWithHA.class.getName()));
+      Thread.sleep(100);
+      logIndex = stateMachine.getLastAppliedTermIndex().getIndex();
+    }
+    return containers;
+  }
+
+  private static class DummyExitManager extends ExitManager {
+    @Override
+    public void exitSystem(int status, String message, Throwable throwable,
+        Logger log) {
+      log.error("System Exit: " + message, throwable);
+    }
+  }
+
+
+  static StorageContainerManager getLeader(MiniOzoneHAClusterImpl impl) {
+    for (StorageContainerManager scm : impl.getStorageContainerManagers()) {
+      if (scm.checkLeader()) {
+        return scm;
+      }
+    }
+    return null;
+  }
+
+  static StorageContainerManager getInactiveSCM(MiniOzoneHAClusterImpl impl) {
+    for (StorageContainerManager scm : impl.getStorageContainerManagers()) {
+      if (!impl.isSCMActive(scm.getScmId())) {
+        return scm;
+      }
+    }
+    return null;
+  }
+}
+
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
index 01d6c27..e4d9797 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
@@ -23,7 +23,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -146,7 +146,7 @@
     verifyEquals(data, containerStateCount);
 
     // Do some changes like allocate containers and change the container states
-    ContainerManager scmContainerManager = scm.getContainerManager();
+    ContainerManagerV2 scmContainerManager = scm.getContainerManager();
 
     List<ContainerInfo> containerInfoList = new ArrayList<>();
     for (int i=0; i < 10; i++) {
@@ -159,16 +159,18 @@
       if (i % 2 == 0) {
         containerID = containerInfoList.get(i).getContainerID();
         scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
-        assertEquals(scmContainerManager.getContainer(new ContainerID(
+            ContainerID.valueOf(containerID),
+            HddsProtos.LifeCycleEvent.FINALIZE);
+        assertEquals(scmContainerManager.getContainer(ContainerID.valueOf(
             containerID)).getState(), HddsProtos.LifeCycleState.CLOSING);
       } else {
         containerID = containerInfoList.get(i).getContainerID();
         scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
+            ContainerID.valueOf(containerID),
+            HddsProtos.LifeCycleEvent.FINALIZE);
         scmContainerManager.updateContainerState(
-            new ContainerID(containerID), HddsProtos.LifeCycleEvent.CLOSE);
-        assertEquals(scmContainerManager.getContainer(new ContainerID(
+            ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.CLOSE);
+        assertEquals(scmContainerManager.getContainer(ContainerID.valueOf(
             containerID)).getState(), HddsProtos.LifeCycleState.CLOSED);
       }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
new file mode 100644
index 0000000..591be84
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
@@ -0,0 +1,231 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.scm;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.time.Instant;
+import java.util.HashMap;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.junit.Rule;
+
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+
+/**
+ * Base class for Ozone Manager HA tests.
+ */
+public class TestStorageContainerManagerHA {
+
+  private MiniOzoneHAClusterImpl cluster = null;
+  private OzoneConfiguration conf;
+  private String clusterId;
+  private String scmId;
+  private String omServiceId;
+  private static int numOfOMs = 3;
+  private String scmServiceId;
+  private static int numOfSCMs = 3;
+
+
+  @Rule
+  public Timeout timeout = new Timeout(300_000);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @Before
+  public void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omServiceId = "om-service-test1";
+    scmServiceId = "scm-service-test1";
+    cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
+        .setClusterId(clusterId)
+        .setScmId(scmId)
+        .setOMServiceId(omServiceId)
+        .setSCMServiceId(scmServiceId)
+        .setNumOfStorageContainerManagers(numOfSCMs)
+        .setNumOfOzoneManagers(numOfOMs)
+        .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testAllSCMAreRunning() throws Exception {
+    int count = 0;
+    List<StorageContainerManager> scms = cluster.getStorageContainerManagers();
+    Assert.assertEquals(numOfSCMs, scms.size());
+    int peerSize = cluster.getStorageContainerManager().getScmHAManager()
+        .getRatisServer().getDivision().getGroup().getPeers().size();
+    for (StorageContainerManager scm : scms) {
+      if (scm.checkLeader()) {
+        count++;
+      }
+      Assert.assertTrue(peerSize == numOfSCMs);
+    }
+    Assert.assertEquals(1, count);
+    count = 0;
+    List<OzoneManager> oms = cluster.getOzoneManagersList();
+    Assert.assertEquals(numOfOMs, oms.size());
+    for (OzoneManager om : oms) {
+      if (om.isLeaderReady()) {
+        count++;
+      }
+    }
+    Assert.assertEquals(1, count);
+    testPutKey();
+  }
+
+  public void testPutKey() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    Instant testStartTime = Instant.now();
+    ObjectStore store =
+        OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
+    String value = "sample value";
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String keyName = UUID.randomUUID().toString();
+
+    OzoneOutputStream out = bucket
+        .createKey(keyName, value.getBytes(UTF_8).length, STAND_ALONE, ONE,
+            new HashMap<>());
+    out.write(value.getBytes(UTF_8));
+    out.close();
+    OzoneKey key = bucket.getKey(keyName);
+    Assert.assertEquals(keyName, key.getName());
+    OzoneInputStream is = bucket.readKey(keyName);
+    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
+    is.read(fileContent);
+    Assert.assertEquals(value, new String(fileContent, UTF_8));
+    Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
+    Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
+    is.close();
+    final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
+        .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS)
+        .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName)
+        .setRefreshPipeline(true).build();
+    final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
+    final List<OmKeyLocationInfo> keyLocationInfos =
+        keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
+    long index = -1;
+    for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
+      if (scm.checkLeader()) {
+        index = getLastAppliedIndex(scm);
+      }
+    }
+    Assert.assertFalse(index == -1);
+    long finalIndex = index;
+    // Ensure all follower scms have caught up with the leader
+    GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
+    final long containerID = keyLocationInfos.get(0).getContainerID();
+    for (int k = 0; k < numOfSCMs; k++) {
+      StorageContainerManager scm =
+          cluster.getStorageContainerManagers().get(k);
+      // flush to DB on each SCM
+      ((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer())
+          .getStateMachine().takeSnapshot();
+      Assert.assertTrue(scm.getContainerManager()
+          .containerExist(ContainerID.valueOf(containerID)));
+      Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable()
+          .get(ContainerID.valueOf(containerID)));
+    }
+  }
+
+  private long getLastAppliedIndex(StorageContainerManager scm) {
+    return scm.getScmHAManager().getRatisServer().getDivision().getInfo()
+        .getLastAppliedIndex();
+  }
+
+  private boolean areAllScmInSync(long leaderIndex) {
+    List<StorageContainerManager> scms = cluster.getStorageContainerManagers();
+    boolean sync = false;
+    for (StorageContainerManager scm : scms) {
+      sync = getLastAppliedIndex(scm) == leaderIndex;
+    }
+    return sync;
+  }
+
+  @Test
+  public void testPrimordialSCM() throws Exception {
+    StorageContainerManager scm1 = cluster.getStorageContainerManagers().get(0);
+    StorageContainerManager scm2 = cluster.getStorageContainerManagers().get(1);
+    OzoneConfiguration conf1 = scm1.getConfiguration();
+    OzoneConfiguration conf2 = scm2.getConfiguration();
+    conf1.set(ScmConfigKeys.OZONE_SCM_PRIMORDIAL_NODE_ID_KEY,
+        scm1.getSCMNodeId());
+    conf2.set(ScmConfigKeys.OZONE_SCM_PRIMORDIAL_NODE_ID_KEY,
+        scm1.getSCMNodeId());
+    Assert.assertTrue(StorageContainerManager.scmBootstrap(conf1));
+    scm1.getScmHAManager().shutdown();
+    Assert.assertTrue(
+        StorageContainerManager.scmInit(conf1, scm1.getClusterId()));
+    Assert.assertTrue(StorageContainerManager.scmBootstrap(conf2));
+    Assert.assertTrue(
+        StorageContainerManager.scmInit(conf2, scm2.getClusterId()));
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
index 441fc03..da8728e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
@@ -93,7 +93,7 @@
     ContainerWithPipeline container1 = storageContainerLocationClient
         .allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf),
+            HddsProtos.ReplicationFactor.ONE,
             OzoneConsts.OZONE);
     XceiverClientSpi client1 = clientManager
         .acquireClient(container1.getPipeline());
@@ -102,7 +102,7 @@
     ContainerWithPipeline container2 = storageContainerLocationClient
         .allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf),
+            HddsProtos.ReplicationFactor.THREE,
             OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
@@ -145,7 +145,7 @@
     ContainerWithPipeline container2 =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            HddsProtos.ReplicationFactor.ONE,
+            HddsProtos.ReplicationFactor.THREE,
             OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
@@ -194,7 +194,7 @@
     ContainerWithPipeline container1 =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf),
+            HddsProtos.ReplicationFactor.ONE,
             OzoneConsts.OZONE);
     XceiverClientSpi client1 = clientManager
         .acquireClient(container1.getPipeline());
@@ -206,7 +206,7 @@
     ContainerWithPipeline container2 =
         storageContainerLocationClient.allocateContainer(
             SCMTestUtils.getReplicationType(conf),
-            SCMTestUtils.getReplicationFactor(conf),
+            HddsProtos.ReplicationFactor.THREE,
             OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
index c42f5a8..bcf9e47 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
@@ -92,7 +92,7 @@
   private OzoneBucket bucket;
   private MiniOzoneCluster cluster;
   private NodeManager nm;
-  private ContainerManager cm;
+  private ContainerManagerV2 cm;
   private PipelineManager pm;
   private StorageContainerManager scm;
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
index bb3da38..6f94c33 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
@@ -92,8 +92,7 @@
     try {
       cluster.getStorageContainerManager()
           .getPipelineManager()
-          .finalizeAndDestroyPipeline(
-              pipeline.get(), false);
+          .closePipeline(pipeline.get(), false);
     } catch (IOException e) {
       e.printStackTrace();
       Assert.fail();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
index 4d86609..fac01fe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
@@ -36,10 +36,10 @@
 import org.apache.hadoop.fs.ozone.OzoneFsShell;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
-import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -131,7 +131,7 @@
     numOfOMs = 3;
     clusterId = UUID.randomUUID().toString();
     scmId = UUID.randomUUID().toString();
-    cluster = MiniOzoneCluster.newHABuilder(conf)
+    cluster = MiniOzoneCluster.newOMHABuilder(conf)
         .setClusterId(clusterId)
         .setScmId(scmId)
         .setOMServiceId(omServiceId)
@@ -232,7 +232,7 @@
    * @return the leader OM's Node ID in the MiniOzoneHACluster.
    */
   private String getLeaderOMNodeId() {
-    MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster;
+    MiniOzoneOMHAClusterImpl haCluster = (MiniOzoneOMHAClusterImpl) cluster;
     OzoneManager omLeader = haCluster.getOMLeader();
     Assert.assertNotNull("There should be a leader OM at this point.",
         omLeader);
@@ -264,7 +264,7 @@
     res[indexOmServiceIds] = getSetConfStringFromConf(
         OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY);
 
-    String omNodesKey = OmUtils.addKeySuffixes(
+    String omNodesKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId);
     String omNodesVal = conf.get(omNodesKey);
     res[indexOmNodes] = generateSetConfString(omNodesKey, omNodesVal);
@@ -274,7 +274,7 @@
     assert(omNodesArr.length == numOfOMs);
     for (int i = 0; i < numOfOMs; i++) {
       res[indexOmAddressStart + i] =
-          getSetConfStringFromConf(OmUtils.addKeySuffixes(
+          getSetConfStringFromConf(ConfUtils.addKeySuffixes(
               OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodesArr[i]));
     }
 
@@ -362,7 +362,7 @@
 
     // Get leader OM node RPC address from ozone.om.address.omServiceId.omNode
     String omLeaderNodeId = getLeaderOMNodeId();
-    String omLeaderNodeAddrKey = OmUtils.addKeySuffixes(
+    String omLeaderNodeAddrKey = ConfUtils.addKeySuffixes(
         OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omLeaderNodeId);
     String omLeaderNodeAddr = conf.get(omLeaderNodeAddrKey);
     String omLeaderNodeAddrWithoutPort = omLeaderNodeAddr.split(":")[0];
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
new file mode 100644
index 0000000..16c664c
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.shell;
+
+import java.net.InetSocketAddress;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.cli.OzoneAdmin;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * This class tests ozone admin scm commands.
+ */
+public class TestScmAdminHA {
+  private static OzoneAdmin ozoneAdmin;
+  private static OzoneConfiguration conf;
+  private static String omServiceId;
+  private static int numOfOMs;
+  private static String clusterId;
+  private static String scmId;
+  private static MiniOzoneCluster cluster;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    ozoneAdmin = new OzoneAdmin();
+    conf = new OzoneConfiguration();
+
+    // Init HA cluster
+    omServiceId = "om-service-test1";
+    numOfOMs = 3;
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    cluster = MiniOzoneCluster.newOMHABuilder(conf)
+        .setClusterId(clusterId)
+        .setScmId(scmId)
+        .setOMServiceId(omServiceId)
+        .setNumOfOzoneManagers(numOfOMs)
+        .build();
+    conf.setQuietMode(false);
+    // enable ratis for Scm.
+    conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
+    cluster.waitForClusterToBeReady();
+  }
+
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testGetRatisRoles() {
+    InetSocketAddress address =
+        cluster.getStorageContainerManager().getClientRpcAddress();
+    String hostPort = address.getHostName() + ":" + address.getPort();
+    String[] args = {"--scm", hostPort, "scm", "roles"};
+    ozoneAdmin.execute(args);
+  }
+}
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 824a654..e0749c7 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -23,6 +23,7 @@
 import java.util.Set;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.DBStoreHAManager;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -35,7 +36,7 @@
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.storage.proto.
     OzoneManagerStorageProtos.PersistedUserVolumeInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -47,7 +48,7 @@
 /**
  * OM metadata manager interface.
  */
-public interface OMMetadataManager {
+public interface OMMetadataManager extends DBStoreHAManager {
   /**
    * Start metadata manager.
    *
@@ -343,7 +344,7 @@
    */
   Table<String, S3SecretValue> getS3SecretTable();
 
-  Table<String, OMTransactionInfo> getTransactionInfoTable();
+  Table<String, TransactionInfo> getTransactionInfoTable();
 
   /**
    * Returns number of rows in a table.  This should not be used for very
diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOMTransactionInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestTransactionInfoCodec.java
similarity index 78%
rename from hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOMTransactionInfoCodec.java
rename to hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestTransactionInfoCodec.java
index a8ffd00..7f33cb4 100644
--- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOMTransactionInfoCodec.java
+++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestTransactionInfoCodec.java
@@ -17,7 +17,8 @@
 
 package org.apache.hadoop.ozone.om.codec;
 
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfoCodec;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
@@ -30,29 +31,29 @@
 import static org.junit.Assert.fail;
 
 /**
- * Class to test {@link OMTransactionInfoCodec}.
+ * Class to test {@link TransactionInfoCodec}.
  */
-public class TestOMTransactionInfoCodec {
+public class TestTransactionInfoCodec {
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
 
-  private OMTransactionInfoCodec codec;
+  private TransactionInfoCodec codec;
 
   @Before
   public void setUp() {
-    codec = new OMTransactionInfoCodec();
+    codec = new TransactionInfoCodec();
   }
   @Test
   public void toAndFromPersistedFormat() throws Exception {
-    OMTransactionInfo omTransactionInfo =
-        new OMTransactionInfo.Builder().setTransactionIndex(100)
+    TransactionInfo transactionInfo =
+        new TransactionInfo.Builder().setTransactionIndex(100)
             .setCurrentTerm(11).build();
 
-    OMTransactionInfo convertedTransactionInfo =
-        codec.fromPersistedFormat(codec.toPersistedFormat(omTransactionInfo));
+    TransactionInfo convertedTransactionInfo =
+        codec.fromPersistedFormat(codec.toPersistedFormat(transactionInfo));
 
-    Assert.assertEquals(omTransactionInfo, convertedTransactionInfo);
+    Assert.assertEquals(transactionInfo, convertedTransactionInfo);
 
   }
   @Test
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 1b65bca..7d6a43b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -47,7 +47,7 @@
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.om.codec.OMTransactionInfoCodec;
+import org.apache.hadoop.hdds.utils.TransactionInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
@@ -70,7 +70,7 @@
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.storage.proto
     .OzoneManagerStorageProtos.PersistedUserVolumeInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -272,7 +272,7 @@
 
       // Check if there is a DB Inconsistent Marker in the metaDir. This
       // marker indicates that the DB is in an inconsistent state and hence
-      // the OM process should be terminated.
+      // the SCM process should be terminated.
       File markerFile = new File(metaDir, DB_TRANSIENT_MARKER);
       if (markerFile.exists()) {
         LOG.error("File {} marks that OM DB is in an inconsistent state.",
@@ -346,7 +346,7 @@
         .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec())
         .addCodec(S3SecretValue.class, new S3SecretValueCodec())
         .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec())
-        .addCodec(OMTransactionInfo.class, new OMTransactionInfoCodec());
+        .addCodec(TransactionInfo.class, new TransactionInfoCodec());
   }
 
   /**
@@ -401,7 +401,7 @@
     checkTableStatus(prefixTable, PREFIX_TABLE);
 
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
-        String.class, OMTransactionInfo.class);
+        String.class, TransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
   }
 
@@ -1133,7 +1133,7 @@
   }
 
   @Override
-  public Table<String, OMTransactionInfo> getTransactionInfoTable() {
+  public Table<String, TransactionInfo> getTransactionInfoTable() {
     return transactionInfoTable;
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index fffcf80..abc0283 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -69,10 +69,8 @@
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient;
@@ -81,9 +79,9 @@
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdds.server.http.RatisDropwizardExports;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.utils.HAUtils;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
-import org.apache.hadoop.hdds.utils.RetriableTask;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
@@ -91,13 +89,10 @@
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -138,8 +133,8 @@
 import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo;
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.ozone.common.ha.ratis.RatisSnapshotInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
@@ -199,7 +194,6 @@
 import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
 import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName;
 import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
-import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
 import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID;
 import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
@@ -312,7 +306,7 @@
   private OMNodeDetails omNodeDetails;
   private List<OMNodeDetails> peerNodes;
   private File omRatisSnapshotDir;
-  private final OMRatisSnapshotInfo omRatisSnapshotInfo;
+  private final RatisSnapshotInfo omRatisSnapshotInfo;
   private final Map<String, RatisDropwizardExports> ratisMetricsMap =
       new ConcurrentHashMap<>();
 
@@ -449,7 +443,7 @@
     // Create special volume s3v which is required for S3G.
     addS3GVolumeToDB();
 
-    this.omRatisSnapshotInfo = new OMRatisSnapshotInfo();
+    this.omRatisSnapshotInfo = new RatisSnapshotInfo();
 
     if (isRatisEnabled) {
       // Create Ratis storage dir
@@ -708,7 +702,7 @@
         .setService(omRpcAddressTxt)
         .setS3SecretManager(s3SecretManager)
         .setCertificateClient(certClient)
-        .setOmServiceId(omNodeDetails.getOMServiceId())
+        .setOmServiceId(omNodeDetails.getServiceId())
         .build();
   }
 
@@ -844,21 +838,7 @@
    */
   private static ScmBlockLocationProtocol getScmBlockClient(
       OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress scmBlockAddress =
-        getScmAddressForBlockClients(conf);
-    ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
-        new ScmBlockLocationProtocolClientSideTranslatorPB(
-            RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
-                scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)));
-    return TracingUtil
-        .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class,
-            conf);
+    return HAUtils.getScmBlockClient(conf);
   }
 
   /**
@@ -868,22 +848,13 @@
    * @throws IOException
    */
   private static StorageContainerLocationProtocol getScmContainerClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
-    InetSocketAddress scmAddr = getScmAddressForClients(
-        conf);
+      OzoneConfiguration conf) {
+    SCMContainerLocationFailoverProxyProvider proxyProvider =
+        new SCMContainerLocationFailoverProxyProvider(conf);
     StorageContainerLocationProtocol scmContainerClient =
         TracingUtil.createProxy(
             new StorageContainerLocationProtocolClientSideTranslatorPB(
-                RPC.getProxy(StorageContainerLocationProtocolPB.class,
-                    scmVersion,
-                    scmAddr, UserGroupInformation.getCurrentUser(), conf,
-                    NetUtils.getDefaultSocketFactory(conf),
-                    Client.getRpcTimeout(conf))),
-            StorageContainerLocationProtocol.class, conf);
+                proxyProvider), StorageContainerLocationProtocol.class, conf);
     return scmContainerClient;
   }
 
@@ -1048,18 +1019,7 @@
 
   private static ScmInfo getScmInfo(OzoneConfiguration conf)
       throws IOException {
-    try {
-      RetryPolicy retryPolicy = retryUpToMaximumCountWithFixedSleep(
-          10, 5, TimeUnit.SECONDS);
-      RetriableTask<ScmInfo> retriable = new RetriableTask<>(
-          retryPolicy, "OM#getScmInfo",
-          () -> getScmBlockClient(conf).getScmInfo());
-      return retriable.call();
-    } catch (IOException e) {
-      throw e;
-    } catch (Exception e) {
-      throw new IOException("Failed to get SCM info", e);
-    }
+    return HAUtils.getScmInfo(conf);
   }
 
   /**
@@ -1352,27 +1312,27 @@
 
   @VisibleForTesting
   long getLastTrxnIndexForNonRatis() throws IOException {
-    OMTransactionInfo omTransactionInfo =
-        OMTransactionInfo.readTransactionInfo(metadataManager);
+    TransactionInfo transactionInfo =
+        TransactionInfo.readTransactionInfo(metadataManager);
     // If the OMTransactionInfo does not exist in DB or if the term is not -1
     // (corresponding to non-Ratis cluster), return 0 so that new incoming
     // requests can have transaction index starting from 1.
-    if (omTransactionInfo == null || omTransactionInfo.getTerm() != -1) {
+    if (transactionInfo == null || transactionInfo.getTerm() != -1) {
       return 0;
     }
     // If there exists a last transaction index in DB, the new incoming
     // requests in non-Ratis cluster must have transaction index
     // incrementally increasing from the stored transaction index onwards.
-    return omTransactionInfo.getTransactionIndex();
+    return transactionInfo.getTransactionIndex();
   }
 
-  public OMRatisSnapshotInfo getSnapshotInfo() {
+  public RatisSnapshotInfo getSnapshotInfo() {
     return omRatisSnapshotInfo;
   }
 
   @VisibleForTesting
   public long getRatisSnapshotIndex() throws IOException {
-    return OMTransactionInfo.readTransactionInfo(metadataManager)
+    return TransactionInfo.readTransactionInfo(metadataManager)
         .getTransactionIndex();
   }
 
@@ -1481,7 +1441,7 @@
 
     OMHANodeDetails haOMHANodeDetails = OMHANodeDetails.loadOMHAConfig(config);
     String serviceName =
-        haOMHANodeDetails.getLocalNodeDetails().getOMServiceId();
+        haOMHANodeDetails.getLocalNodeDetails().getServiceId();
     if (!StringUtils.isEmpty(serviceName)) {
       builder.addServiceName(serviceName);
     }
@@ -2682,7 +2642,7 @@
                 .build());
 
         OMRoleInfo peerOmRole = OMRoleInfo.newBuilder()
-            .setNodeId(peerNode.getOMNodeId())
+            .setNodeId(peerNode.getNodeId())
             .setServerRole(RaftPeerRole.FOLLOWER.name())
             .build();
         peerOmServiceInfoBuilder.setOmRoleInfo(peerOmRole);
@@ -3267,7 +3227,7 @@
       throws Exception {
 
     Path checkpointLocation = omDBCheckpoint.getCheckpointLocation();
-    OMTransactionInfo checkpointTrxnInfo = OzoneManagerRatisUtils
+    TransactionInfo checkpointTrxnInfo = OzoneManagerRatisUtils
         .getTrxnInfoFromCheckpoint(configuration, checkpointLocation);
 
     LOG.info("Installing checkpoint with OMTransactionInfo {}",
@@ -3277,7 +3237,7 @@
   }
 
   TermIndex installCheckpoint(String leaderId, Path checkpointLocation,
-      OMTransactionInfo checkpointTrxnInfo) throws Exception {
+      TransactionInfo checkpointTrxnInfo) throws Exception {
 
     File oldDBLocation = metadataManager.getStore().getDbLocation();
     try {
@@ -3494,11 +3454,11 @@
   }
 
   public String getOMNodeId() {
-    return omNodeDetails.getOMNodeId();
+    return omNodeDetails.getNodeId();
   }
 
   public String getOMServiceId() {
-    return omNodeDetails.getOMServiceId();
+    return omNodeDetails.getServiceId();
   }
 
   @VisibleForTesting
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
index 99dc3e6..e2afa5b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
@@ -31,6 +31,8 @@
 
 import java.io.IOException;
 
+import static org.apache.hadoop.hdds.utils.HAUtils.checkSecurityAndSCMHAEnabled;
+
 /**
  * This class provides a command line interface to start the OM
  * using Picocli.
@@ -122,6 +124,7 @@
     @Override
     public void start(OzoneConfiguration conf) throws IOException,
         AuthenticationException {
+      checkSecurityAndSCMHAEnabled(conf);
       OzoneManager om = OzoneManager.createOm(conf);
       om.start();
       om.join();
@@ -130,6 +133,7 @@
     @Override
     public boolean init(OzoneConfiguration conf) throws IOException,
         AuthenticationException {
+      checkSecurityAndSCMHAEnabled(conf);
       return OzoneManager.omInit(conf);
     }
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index b1c5096..6e30ca4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.om.codec;
 
+import org.apache.hadoop.hdds.utils.TransactionInfoCodec;
 import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.DBDefinition;
 import org.apache.hadoop.hdds.utils.db.LongCodec;
@@ -33,7 +34,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
 
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos;
 
@@ -133,14 +134,14 @@
                     S3SecretValue.class,
                     new S3SecretValueCodec());
 
-  public static final DBColumnFamilyDefinition<String, OMTransactionInfo>
+  public static final DBColumnFamilyDefinition<String, TransactionInfo>
             TRANSACTION_INFO_TABLE =
             new DBColumnFamilyDefinition<>(
                     OmMetadataManagerImpl.TRANSACTION_INFO_TABLE,
                     String.class,
                     new StringCodec(),
-                    OMTransactionInfo.class,
-                    new OMTransactionInfoCodec());
+                    TransactionInfo.class,
+                    new TransactionInfoCodec());
 
   @Override
   public String getName() {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
index e74720a..dd44868 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.OzoneIllegalArgumentException;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -133,7 +134,7 @@
         } else {
           isPeer = false;
         }
-        String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
+        String rpcAddrKey = ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
             serviceId, nodeId);
         String rpcAddrStr = OmUtils.getOmRpcAddress(conf, rpcAddrKey);
         if (rpcAddrStr == null || rpcAddrStr.isEmpty()) {
@@ -147,7 +148,7 @@
         // default
         isOMAddressSet = true;
 
-        String ratisPortKey = OmUtils.addKeySuffixes(OZONE_OM_RATIS_PORT_KEY,
+        String ratisPortKey = ConfUtils.addKeySuffixes(OZONE_OM_RATIS_PORT_KEY,
             serviceId, nodeId);
         int ratisPort = conf.getInt(ratisPortKey, OZONE_OM_RATIS_PORT_DEFAULT);
 
@@ -166,7 +167,7 @@
               rpcAddrStr);
         }
 
-        if (!addr.isUnresolved() && !isPeer && OmUtils.isAddressLocal(addr)) {
+        if (!addr.isUnresolved() && !isPeer && ConfUtils.isAddressLocal(addr)) {
           localRpcAddress = addr;
           localOMServiceId = serviceId;
           localOMNodeId = nodeId;
@@ -188,7 +189,8 @@
             localOMServiceId, localOMNodeId,
             NetUtils.getHostPortString(localRpcAddress), localRatisPort);
 
-        setOMNodeSpecificConfigs(conf, localOMServiceId, localOMNodeId);
+        ConfUtils.setNodeSpecificConfigs(genericConfigKeys, conf,
+            localOMServiceId, localOMNodeId, LOG);
         return new OMHANodeDetails(getHAOMNodeDetails(conf, localOMServiceId,
             localOMNodeId, localRpcAddress, localRatisPort), peerNodesList);
 
@@ -292,36 +294,6 @@
         .build();
   }
 
-  /**
-   * Check if any of the following configuration keys have been set using OM
-   * Node ID suffixed to the key. If yes, then set the base key with the
-   * configured valued.
-   *    1. {@link OMConfigKeys#OZONE_OM_HTTP_ADDRESS_KEY}
-   *    2. {@link OMConfigKeys#OZONE_OM_HTTPS_ADDRESS_KEY}
-   *    3. {@link OMConfigKeys#OZONE_OM_HTTP_BIND_HOST_KEY}
-   *    4. {@link OMConfigKeys#OZONE_OM_HTTPS_BIND_HOST_KEY}\
-   *    5. {@link OMConfigKeys#OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE}
-   *    6. {@link OMConfigKeys#OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY}
-   *    7. {@link OMConfigKeys#OZONE_OM_KERBEROS_KEYTAB_FILE_KEY}
-   *    8. {@link OMConfigKeys#OZONE_OM_KERBEROS_PRINCIPAL_KEY}
-   *    9. {@link OMConfigKeys#OZONE_OM_DB_DIRS}
-   *    10. {@link OMConfigKeys#OZONE_OM_ADDRESS_KEY}
-   */
-  private static void setOMNodeSpecificConfigs(
-      OzoneConfiguration ozoneConfiguration, String omServiceId,
-      String omNodeId) {
-
-    for (String confKey : genericConfigKeys) {
-      String confValue = OmUtils.getConfSuffixedWithOMNodeId(
-          ozoneConfiguration, confKey, omServiceId, omNodeId);
-      if (confValue != null) {
-        LOG.info("Setting configuration key {} with value of key {}: {}",
-            confKey, OmUtils.addKeySuffixes(confKey, omNodeId), confValue);
-        ozoneConfiguration.set(confKey, confValue);
-      }
-    }
-  }
-
   private static void throwConfException(String message, String... arguments)
       throws IllegalArgumentException {
     String exceptionMsg = String.format(message, arguments);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java
index 3fe8735..29ddef0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java
@@ -20,9 +20,8 @@
 import org.apache.commons.lang3.StringUtils;
 
 import org.apache.hadoop.hdds.server.http.HttpConfig;
-import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.ha.NodeDetails;
 
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
 
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
@@ -31,14 +30,8 @@
 /**
  * This class stores OM node details.
  */
-public final class OMNodeDetails {
-  private String omServiceId;
-  private String omNodeId;
-  private InetSocketAddress rpcAddress;
+public final class OMNodeDetails extends NodeDetails {
   private int rpcPort;
-  private int ratisPort;
-  private String httpAddress;
-  private String httpsAddress;
 
   /**
    * Constructs OMNodeDetails object.
@@ -46,28 +39,27 @@
   private OMNodeDetails(String serviceId, String nodeId,
       InetSocketAddress rpcAddr, int rpcPort, int ratisPort,
       String httpAddress, String httpsAddress) {
-    this.omServiceId = serviceId;
-    this.omNodeId = nodeId;
-    this.rpcAddress = rpcAddr;
+    super(serviceId, nodeId, rpcAddr, ratisPort, httpAddress, httpsAddress);
     this.rpcPort = rpcPort;
-    this.ratisPort = ratisPort;
-    this.httpAddress = httpAddress;
-    this.httpsAddress = httpsAddress;
   }
 
   @Override
   public String toString() {
     return "OMNodeDetails["
-        + "omServiceId=" + omServiceId +
-        ", omNodeId=" + omNodeId +
-        ", rpcAddress=" + rpcAddress +
-        ", rpcPort=" + rpcPort +
-        ", ratisPort=" + ratisPort +
-        ", httpAddress=" + httpAddress +
-        ", httpsAddress=" + httpsAddress +
+        + "omServiceId=" + getServiceId() +
+        ", omNodeId=" + getNodeId() +
+        ", rpcAddress=" + getRpcAddressString() +
+        ", rpcPort=" + getRpcPort() +
+        ", ratisPort=" + getRatisPort() +
+        ", httpAddress=" + getHttpAddress() +
+        ", httpsAddress=" + getHttpsAddress() +
         "]";
   }
 
+  public int getRpcPort() {
+    return rpcPort;
+  }
+
   /**
    * Builder class for OMNodeDetails.
    */
@@ -117,60 +109,18 @@
     }
   }
 
-  public String getOMServiceId() {
-    return omServiceId;
-  }
-
-  public String getOMNodeId() {
-    return omNodeId;
-  }
-
-  public InetSocketAddress getRpcAddress() {
-    return rpcAddress;
-  }
-
-  public boolean isHostUnresolved() {
-    return rpcAddress.isUnresolved();
-  }
-
-  public InetAddress getInetAddress() {
-    return rpcAddress.getAddress();
-  }
-
-  public String getHostName() {
-    return rpcAddress.getHostName();
-  }
-
-  public String getRatisHostPortStr() {
-    StringBuilder hostPort = new StringBuilder();
-    hostPort.append(getHostName())
-        .append(":")
-        .append(ratisPort);
-    return hostPort.toString();
-  }
-
-  public int getRatisPort() {
-    return ratisPort;
-  }
-
-  public int getRpcPort() {
-    return rpcPort;
-  }
-
-  public String getRpcAddressString() {
-    return NetUtils.getHostPortString(rpcAddress);
-  }
-
   public String getOMDBCheckpointEnpointUrl(HttpConfig.Policy httpPolicy) {
     if (httpPolicy.isHttpEnabled()) {
-      if (StringUtils.isNotEmpty(httpAddress)) {
-        return "http://" + httpAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT
-            + "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true";
+      if (StringUtils.isNotEmpty(getHttpAddress())) {
+        return "http://" + getHttpAddress() +
+            OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT +
+            "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true";
       }
     } else {
-      if (StringUtils.isNotEmpty(httpsAddress)) {
-        return "https://" + httpsAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT
-            + "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true";
+      if (StringUtils.isNotEmpty(getHttpsAddress())) {
+        return "https://" + getHttpsAddress() +
+            OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT +
+            "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true";
       }
     }
     return null;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index dea0d5d..dc6d1a9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -37,6 +37,7 @@
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.function.SupplierWithIOException;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.util.Time;
@@ -215,7 +216,7 @@
   }
 
   /**
-   * Add to writeBatch {@link OMTransactionInfo}.
+   * Add to writeBatch {@link TransactionInfo}.
    */
   private Void addToBatchTransactionInfoWithTrace(String parentName,
       long transactionIndex, SupplierWithIOException<Void> supplier)
@@ -277,7 +278,7 @@
                 (SupplierWithIOException<Void>) () -> {
                   omMetadataManager.getTransactionInfoTable().putWithBatch(
                       batchOperation, TRANSACTION_INFO_KEY,
-                      new OMTransactionInfo.Builder()
+                      new TransactionInfo.Builder()
                           .setTransactionIndex(lastRatisTransactionIndex)
                           .setCurrentTerm(term).build());
                   return null;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
index be9677d..28a16f9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
@@ -308,9 +308,9 @@
       throws IOException {
 
     // RaftGroupId is the omServiceId
-    String omServiceId = omNodeDetails.getOMServiceId();
+    String omServiceId = omNodeDetails.getServiceId();
 
-    String omNodeId = omNodeDetails.getOMNodeId();
+    String omNodeId = omNodeDetails.getNodeId();
     RaftPeerId localRaftPeerId = RaftPeerId.getRaftPeerId(omNodeId);
 
     InetSocketAddress ratisAddr = new InetSocketAddress(
@@ -326,7 +326,7 @@
     raftPeers.add(localRaftPeer);
 
     for (OMNodeDetails peerInfo : peerNodes) {
-      String peerNodeId = peerInfo.getOMNodeId();
+      String peerNodeId = peerInfo.getNodeId();
       RaftPeerId raftPeerId = RaftPeerId.valueOf(peerNodeId);
       RaftPeer raftPeer;
       if (peerInfo.isHostUnresolved()) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
index a81d5d4..21b770e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java
@@ -32,6 +32,8 @@
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.ozone.common.ha.ratis.RatisSnapshotInfo;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
@@ -80,7 +82,7 @@
   private RequestHandler handler;
   private RaftGroupId raftGroupId;
   private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer;
-  private final OMRatisSnapshotInfo snapshotInfo;
+  private final RatisSnapshotInfo snapshotInfo;
   private final ExecutorService executorService;
   private final ExecutorService installSnapshotExecutor;
   private final boolean isTracingEnabled;
@@ -505,15 +507,15 @@
     // This is done, as we have a check in Ratis for not throwing
     // LeaderNotReadyException, it checks stateMachineIndex >= raftLog
     // nextIndex (placeHolderIndex).
-    OMTransactionInfo omTransactionInfo =
-        OMTransactionInfo.readTransactionInfo(
+    TransactionInfo transactionInfo =
+        TransactionInfo.readTransactionInfo(
             ozoneManager.getMetadataManager());
-    if (omTransactionInfo != null) {
+    if (transactionInfo != null) {
       setLastAppliedTermIndex(TermIndex.valueOf(
-          omTransactionInfo.getTerm(),
-          omTransactionInfo.getTransactionIndex()));
-      snapshotInfo.updateTermIndex(omTransactionInfo.getTerm(),
-          omTransactionInfo.getTransactionIndex());
+          transactionInfo.getTerm(),
+          transactionInfo.getTransactionIndex()));
+      snapshotInfo.updateTermIndex(transactionInfo.getTerm(),
+          transactionInfo.getTransactionIndex());
     }
     LOG.info("LastAppliedIndex is set from TransactionInfo from OM DB as {}",
         getLastAppliedTermIndex());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 681c0da..14657c6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -19,12 +19,11 @@
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.hdds.utils.HAUtils;
 import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest;
 import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest;
 import org.apache.hadoop.ozone.om.request.bucket.OMBucketSetPropertyRequest;
@@ -69,14 +68,11 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-import org.apache.ratis.util.FileUtils;
 import org.rocksdb.RocksDBException;
 
 import java.io.IOException;
 import java.nio.file.Path;
 
-import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE;
 
 /**
  * Utility class used by OzoneManager HA.
@@ -233,47 +229,10 @@
   /**
    * Obtain OMTransactionInfo from Checkpoint.
    */
-  public static OMTransactionInfo getTrxnInfoFromCheckpoint(
+  public static TransactionInfo getTrxnInfoFromCheckpoint(
       OzoneConfiguration conf, Path dbPath) throws Exception {
-
-    if (dbPath != null) {
-      Path dbDir = dbPath.getParent();
-      Path dbFile = dbPath.getFileName();
-      if (dbDir != null && dbFile != null) {
-        return getTransactionInfoFromDB(conf, dbDir, dbFile.toString());
-      }
-    }
-    
-    throw new IOException("Checkpoint " + dbPath + " does not have proper " +
-        "DB location");
-  }
-
-  /**
-   * Obtain Transaction info from DB.
-   * @param tempConfig
-   * @param dbDir path to DB
-   * @return OMTransactionInfo
-   * @throws Exception
-   */
-  private static OMTransactionInfo getTransactionInfoFromDB(
-      OzoneConfiguration tempConfig, Path dbDir, String dbName)
-      throws Exception {
-    DBStore dbStore = OmMetadataManagerImpl.loadDB(tempConfig, dbDir.toFile(),
-        dbName);
-
-    Table<String, OMTransactionInfo> transactionInfoTable =
-        dbStore.getTable(TRANSACTION_INFO_TABLE,
-            String.class, OMTransactionInfo.class);
-
-    OMTransactionInfo omTransactionInfo =
-        transactionInfoTable.get(TRANSACTION_INFO_KEY);
-    dbStore.close();
-
-    if (omTransactionInfo == null) {
-      throw new IOException("Failed to read OMTransactionInfo from DB " +
-          dbName + " at " + dbDir);
-    }
-    return omTransactionInfo;
+    return HAUtils
+        .getTrxnInfoFromCheckpoint(conf, dbPath, new OMDBDefinition());
   }
 
   /**
@@ -281,31 +240,16 @@
    *
    * If transaction info transaction Index is less than or equal to
    * lastAppliedIndex, return false, else return true.
-   * @param omTransactionInfo
+   * @param transactionInfo
    * @param lastAppliedIndex
    * @param leaderId
    * @param newDBlocation
    * @return boolean
    */
-  public static boolean verifyTransactionInfo(
-      OMTransactionInfo omTransactionInfo,
-      long lastAppliedIndex,
-      String leaderId, Path newDBlocation) {
-    if (omTransactionInfo.getTransactionIndex() <= lastAppliedIndex) {
-      OzoneManager.LOG.error("Failed to install checkpoint from OM leader: {}" +
-              ". The last applied index: {} is greater than or equal to the " +
-              "checkpoint's applied index: {}. Deleting the downloaded " +
-              "checkpoint {}", leaderId, lastAppliedIndex,
-          omTransactionInfo.getTransactionIndex(), newDBlocation);
-      try {
-        FileUtils.deleteFully(newDBlocation);
-      } catch (IOException e) {
-        OzoneManager.LOG.error("Failed to fully delete the downloaded DB " +
-            "checkpoint {} from OM leader {}.", newDBlocation, leaderId, e);
-      }
-      return false;
-    }
-
-    return true;
+  public static boolean verifyTransactionInfo(TransactionInfo transactionInfo,
+      long lastAppliedIndex, String leaderId, Path newDBlocation) {
+    return HAUtils
+        .verifyTransactionInfo(transactionInfo, lastAppliedIndex, leaderId,
+            newDBlocation, OzoneManager.LOG);
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
index a11c60b..097d142 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java
@@ -79,7 +79,7 @@
 
     this.peerNodesMap = new HashMap<>();
     for (OMNodeDetails peerNode : peerNodes) {
-      this.peerNodesMap.put(peerNode.getOMNodeId(), peerNode);
+      this.peerNodesMap.put(peerNode.getNodeId(), peerNode);
     }
 
     this.httpPolicy = HttpConfig.getHttpPolicy(conf);
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
index 49d9f47..32b2ac9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java
@@ -24,6 +24,7 @@
 import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.AddSCMRequest;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
@@ -183,6 +184,11 @@
   }
 
   @Override
+  public boolean addSCM(AddSCMRequest request) throws IOException {
+    return false;
+  }
+
+  @Override
   public List<DatanodeDetails> sortDatanodes(List<String> nodes,
       String clientMachine) throws IOException {
     return null;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
index 5cc048b..2bce838 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.junit.Assert;
 import org.junit.Before;
@@ -68,22 +68,22 @@
   @Test
   public void testTransactionTable() throws Exception {
     omMetadataManager.getTransactionInfoTable().put(TRANSACTION_INFO_KEY,
-        new OMTransactionInfo.Builder().setCurrentTerm(1)
+        new TransactionInfo.Builder().setCurrentTerm(1)
             .setTransactionIndex(100).build());
 
     omMetadataManager.getTransactionInfoTable().put(TRANSACTION_INFO_KEY,
-        new OMTransactionInfo.Builder().setCurrentTerm(2)
+        new TransactionInfo.Builder().setCurrentTerm(2)
             .setTransactionIndex(200).build());
 
     omMetadataManager.getTransactionInfoTable().put(TRANSACTION_INFO_KEY,
-        new OMTransactionInfo.Builder().setCurrentTerm(3)
+        new TransactionInfo.Builder().setCurrentTerm(3)
             .setTransactionIndex(250).build());
 
-    OMTransactionInfo omTransactionInfo =
+    TransactionInfo transactionInfo =
         omMetadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
 
-    Assert.assertEquals(3, omTransactionInfo.getTerm());
-    Assert.assertEquals(250, omTransactionInfo.getTransactionIndex());
+    Assert.assertEquals(3, transactionInfo.getTerm());
+    Assert.assertEquals(250, transactionInfo.getTransactionIndex());
 
 
   }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java
index a34a412..c9d590b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java
@@ -22,6 +22,7 @@
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
 import org.junit.After;
 import org.junit.Assert;
@@ -133,13 +134,13 @@
     assertEquals(bucketCount, lastAppliedIndex);
 
 
-    OMTransactionInfo omTransactionInfo =
+    TransactionInfo transactionInfo =
         omMetadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
-    assertNotNull(omTransactionInfo);
+    assertNotNull(transactionInfo);
 
     Assert.assertEquals(lastAppliedIndex,
-        omTransactionInfo.getTransactionIndex());
-    Assert.assertEquals(term, omTransactionInfo.getTerm());
+        transactionInfo.getTransactionIndex());
+    Assert.assertEquals(term, transactionInfo.getTerm());
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
index 260e2cd..ce19ae0 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
@@ -24,6 +24,7 @@
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.AuditLogger;
@@ -196,13 +197,13 @@
     Assert.assertEquals(bucketCount + deleteCount + 1, lastAppliedIndex);
 
 
-    OMTransactionInfo omTransactionInfo =
+    TransactionInfo transactionInfo =
         omMetadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
-    assertNotNull(omTransactionInfo);
+    assertNotNull(transactionInfo);
 
     Assert.assertEquals(lastAppliedIndex,
-        omTransactionInfo.getTransactionIndex());
-    Assert.assertEquals(term, omTransactionInfo.getTerm());
+        transactionInfo.getTransactionIndex());
+    Assert.assertEquals(term, transactionInfo.getTerm());
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
index 66c26a9..d4eda6e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
@@ -28,10 +28,12 @@
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.ha.ratis.RatisSnapshotInfo;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.ha.OMNodeDetails;
@@ -108,7 +110,7 @@
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
     when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
     initialTermIndex = TermIndex.valueOf(0, 0);
-    OMRatisSnapshotInfo omRatisSnapshotInfo = new OMRatisSnapshotInfo();
+    RatisSnapshotInfo omRatisSnapshotInfo = new RatisSnapshotInfo();
     when(ozoneManager.getSnapshotInfo()).thenReturn(omRatisSnapshotInfo);
     secConfig = new SecurityConfig(conf);
     certClient = new OMCertificateClient(secConfig);
@@ -146,7 +148,7 @@
         snapshotInfo.getTerm(), snapshotInfo.getIndex() + 100);
 
     omMetadataManager.getTransactionInfoTable().put(TRANSACTION_INFO_KEY,
-        new OMTransactionInfo.Builder()
+        new TransactionInfo.Builder()
             .setCurrentTerm(snapshotInfo.getTerm())
             .setTransactionIndex(snapshotInfo.getIndex() + 100)
             .build());
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
index 285c992..3368cff 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.ozone.om.ratis;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.common.ha.ratis.RatisSnapshotInfo;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
@@ -58,7 +59,7 @@
 
     when(ozoneManagerRatisServer.getOzoneManager()).thenReturn(ozoneManager);
     when(ozoneManager.getSnapshotInfo()).thenReturn(
-        Mockito.mock(OMRatisSnapshotInfo.class));
+        Mockito.mock(RatisSnapshotInfo.class));
     ozoneManagerStateMachine =
         new OzoneManagerStateMachine(ozoneManagerRatisServer, false);
     ozoneManagerStateMachine.notifyTermIndexUpdated(0, 0);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
index b3f4c5f..8ddc8ac 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java
@@ -172,11 +172,7 @@
   StorageContainerLocationProtocol getSCMProtocol(
       final OzoneConfiguration configuration) {
     StorageContainerLocationProtocol storageContainerLocationProtocol = null;
-    try {
-      storageContainerLocationProtocol = newContainerRpcClient(configuration);
-    } catch (IOException e) {
-      LOG.error("Error in provisioning StorageContainerLocationProtocol ", e);
-    }
+    storageContainerLocationProtocol = newContainerRpcClient(configuration);
     return storageContainerLocationProtocol;
   }
 
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
index c2b1c5f..596935f 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
@@ -20,6 +20,7 @@
 
 import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdds.recon.ReconConfig.ConfigStrings.OZONE_RECON_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.utils.HAUtils.checkSecurityAndSCMHAEnabled;
 
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.StringUtils;
@@ -75,6 +76,7 @@
         ReconServer.class, originalArgs, LOG);
 
     configuration = createOzoneConfiguration();
+    checkSecurityAndSCMHAEnabled(configuration);
     ConfigurationProvider.setConfiguration(configuration);
 
     injector =  Guice.createInjector(new
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
index 3b94b0b..2123457 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
@@ -78,7 +78,7 @@
   @GET
   public Response getClusterState() {
     List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
-    int containers = this.containerManager.getContainerIDs().size();
+    int containers = this.containerManager.getContainers().size();
     int pipelines = this.pipelineManager.getPipelines().size();
     int healthyDatanodes =
         nodeManager.getNodeCount(NodeStatus.inServiceHealthy());
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
index 5cd6ec8..0e26afe 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
@@ -246,7 +246,7 @@
           long containerID = container.getContainerId();
           try {
             ContainerInfo containerInfo =
-                containerManager.getContainer(new ContainerID(containerID));
+                containerManager.getContainer(ContainerID.valueOf(containerID));
             long keyCount = containerInfo.getNumberOfKeys();
             UUID pipelineID = containerInfo.getPipelineID().getId();
 
@@ -307,7 +307,7 @@
       for (UnhealthyContainers c : containers) {
         long containerID = c.getContainerId();
         ContainerInfo containerInfo =
-            containerManager.getContainer(new ContainerID(containerID));
+            containerManager.getContainer(ContainerID.valueOf(containerID));
         long keyCount = containerInfo.getNumberOfKeys();
         UUID pipelineID = containerInfo.getPipelineID().getId();
         List<ContainerHistory> datanodes =
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
index 80764aa..04afed2 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
@@ -49,14 +49,14 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(ContainerHealthTask.class);
 
-  private ContainerManager containerManager;
+  private ContainerManagerV2 containerManager;
   private ContainerHealthSchemaManager containerHealthSchemaManager;
   private PlacementPolicy placementPolicy;
   private final long interval;
   private Set<ContainerInfo> processedContainers = new HashSet<>();
 
   public ContainerHealthTask(
-      ContainerManager containerManager,
+      ContainerManagerV2 containerManager,
       ReconTaskStatusDao reconTaskStatusDao,
       ContainerHealthSchemaManager containerHealthSchemaManager,
       PlacementPolicy placementPolicy,
@@ -98,7 +98,7 @@
   private ContainerHealthStatus setCurrentContainer(long recordId)
       throws ContainerNotFoundException {
     ContainerInfo container =
-        containerManager.getContainer(new ContainerID(recordId));
+        containerManager.getContainer(ContainerID.valueOf(recordId));
     Set<ContainerReplica> replicas =
         containerManager.getContainerReplicas(container.containerID());
     return new ContainerHealthStatus(container, replicas, placementPolicy);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
index f80d6ad..c6d35ee 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
@@ -30,22 +30,25 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.conf.Configuration;
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.ContainerReplicaNotFoundException;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.recon.persistence.ContainerHistory;
 import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
 import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
@@ -57,11 +60,12 @@
 /**
  * Recon's overriding implementation of SCM's Container Manager.
  */
-public class ReconContainerManager extends SCMContainerManager {
+public class ReconContainerManager extends ContainerManagerImpl {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(ReconContainerManager.class);
-  private final StorageContainerServiceProvider scmClient;
+  private StorageContainerServiceProvider scmClient;
+  private PipelineManager pipelineManager;
   private final ContainerHealthSchemaManager containerHealthSchemaManager;
   private final ContainerDBServiceProvider cdbServiceProvider;
   private final Table<UUID, DatanodeDetails> nodeDB;
@@ -78,21 +82,25 @@
    *
    * @throws IOException on Failure.
    */
+  @SuppressWarnings("parameternumber")
   public ReconContainerManager(
-      ConfigurationSource conf,
+      Configuration conf,
+      DBStore store,
       Table<ContainerID, ContainerInfo> containerStore,
-      DBStore batchHandler,
       PipelineManager pipelineManager,
       StorageContainerServiceProvider scm,
       ContainerHealthSchemaManager containerHealthSchemaManager,
-      ContainerDBServiceProvider containerDBServiceProvider)
+      ContainerDBServiceProvider containerDBServiceProvider,
+      SCMHAManager scmhaManager,
+      SequenceIdGenerator sequenceIdGen)
       throws IOException {
-    super(conf, containerStore, batchHandler, pipelineManager);
+    super(conf, scmhaManager, sequenceIdGen, pipelineManager, containerStore);
     this.scmClient = scm;
+    this.pipelineManager = pipelineManager;
     this.containerHealthSchemaManager = containerHealthSchemaManager;
     this.cdbServiceProvider = containerDBServiceProvider;
     // batchHandler = scmDBStore
-    this.nodeDB = ReconSCMDBDefinition.NODES.getTable(batchHandler);
+    this.nodeDB = ReconSCMDBDefinition.NODES.getTable(store);
     this.replicaHistoryMap = new ConcurrentHashMap<>();
   }
 
@@ -107,7 +115,7 @@
       ContainerReplicaProto.State replicaState,
       DatanodeDetails datanodeDetails)
       throws IOException {
-    if (!exists(containerID)) {
+    if (!containerExist(containerID)) {
       LOG.info("New container {} got from {}.", containerID,
           datanodeDetails.getHostName());
       ContainerWithPipeline containerWithPipeline =
@@ -115,7 +123,7 @@
       LOG.debug("Verified new container from SCM {}, {} ",
           containerID, containerWithPipeline.getPipeline().getId());
       // If no other client added this, go ahead and add this container.
-      if (!exists(containerID)) {
+      if (!containerExist(containerID)) {
         addNewContainer(containerID.getId(), containerWithPipeline);
       }
     } else {
@@ -129,7 +137,11 @@
           && isHealthy(replicaState)) {
         LOG.info("Container {} has state OPEN, but Replica has State {}.",
             containerID, replicaState);
-        updateContainerState(containerID, FINALIZE);
+        try {
+          updateContainerState(containerID, FINALIZE);
+        } catch (InvalidStateTransitionException e) {
+          throw new IOException(e);
+        }
       }
     }
   }
@@ -150,16 +162,16 @@
                               ContainerWithPipeline containerWithPipeline)
       throws IOException {
     ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
-    getLock().lock();
     try {
-      boolean success = false;
       if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN)) {
         PipelineID pipelineID = containerWithPipeline.getPipeline().getId();
-        if (getPipelineManager().containsPipeline(pipelineID)) {
-          getContainerStateManager().addContainerInfo(containerId,
-              containerInfo, getPipelineManager(),
-              containerWithPipeline.getPipeline());
-          success = true;
+        if (pipelineManager.containsPipeline(pipelineID)) {
+          getContainerStateManager().addContainer(containerInfo.getProtobuf());
+          pipelineManager.addContainerToPipeline(
+              containerWithPipeline.getPipeline().getId(),
+              containerInfo.containerID());
+          LOG.info("Successfully added container {} to Recon.",
+              containerInfo.containerID());
         } else {
           // Get open container for a pipeline that Recon does not know
           // about yet. Cannot update internal state until pipeline is synced.
@@ -170,24 +182,17 @@
       } else {
         // Non 'Open' Container. No need to worry about pipeline since SCM
         // returns a random pipelineID.
-        getContainerStateManager().addContainerInfo(containerId,
-            containerInfo, getPipelineManager(), null);
-        success = true;
-      }
-      if (success) {
-        addContainerToDB(containerInfo);
+        getContainerStateManager().addContainer(containerInfo.getProtobuf());
         LOG.info("Successfully added container {} to Recon.",
             containerInfo.containerID());
       }
     } catch (IOException ex) {
       LOG.info("Exception while adding container {} .",
           containerInfo.containerID(), ex);
-      getPipelineManager().removeContainerFromPipeline(
+      pipelineManager.removeContainerFromPipeline(
           containerInfo.getPipelineID(),
-          new ContainerID(containerInfo.getContainerID()));
+          ContainerID.valueOf(containerInfo.getContainerID()));
       throw ex;
-    } finally {
-      getLock().unlock();
     }
   }
 
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
index 228a657..cd65ca7 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
@@ -24,7 +24,7 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
@@ -41,7 +41,7 @@
       LoggerFactory.getLogger(ReconContainerReportHandler.class);
 
   public ReconContainerReportHandler(NodeManager nodeManager,
-                                     ContainerManager containerManager) {
+                                     ContainerManagerV2 containerManager) {
     super(nodeManager, containerManager);
   }
 
@@ -56,7 +56,7 @@
 
     List<ContainerReplicaProto> reportsList = containerReport.getReportsList();
     for (ContainerReplicaProto containerReplicaProto : reportsList) {
-      final ContainerID id = ContainerID.valueof(
+      final ContainerID id = ContainerID.valueOf(
           containerReplicaProto.getContainerID());
       try {
         containerManager.checkAndAddNewContainer(id,
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDatanodeProtocolServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDatanodeProtocolServer.java
index f1a8f01..2f14806 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDatanodeProtocolServer.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDatanodeProtocolServer.java
@@ -23,10 +23,11 @@
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import org.apache.hadoop.ozone.protocol.ReconDatanodeProtocol;
 import org.apache.hadoop.ozone.protocolPB.ReconDatanodeProtocolPB;
@@ -62,7 +63,8 @@
   }
 
   @Override
-  public InetSocketAddress getDataNodeBindAddress(OzoneConfiguration conf) {
+  public InetSocketAddress getDataNodeBindAddress(
+      OzoneConfiguration conf, SCMNodeDetails scmNodeDetails) {
     return HddsServerUtil.getReconDataNodeBindAddress(conf);
   }
 
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
index 0262c8b..0cc4926 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
@@ -23,13 +23,15 @@
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -43,8 +45,8 @@
       ReconIncrementalContainerReportHandler.class);
 
   public ReconIncrementalContainerReportHandler(NodeManager nodeManager,
-      ContainerManager containerManager) {
-    super(nodeManager, containerManager);
+      ContainerManagerV2 containerManager, SCMContext scmContext) {
+    super(nodeManager, containerManager, scmContext);
   }
 
   @Override
@@ -70,7 +72,7 @@
     for (ContainerReplicaProto replicaProto :
         report.getReport().getReportList()) {
       try {
-        final ContainerID id = ContainerID.valueof(
+        final ContainerID id = ContainerID.valueOf(
             replicaProto.getContainerID());
         try {
           containerManager.checkAndAddNewContainer(id, replicaProto.getState(),
@@ -88,7 +90,7 @@
         success = false;
         LOG.error("Received ICR from unknown datanode {}.",
             report.getDatanodeDetails(), ex);
-      } catch (IOException e) {
+      } catch (IOException | InvalidStateTransitionException e) {
         success = false;
         LOG.error("Exception while processing ICR for container {}",
             replicaProto.getContainerID());
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index 218f717..3545efb 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -69,7 +70,8 @@
                           EventPublisher eventPublisher,
                           NetworkTopology networkTopology,
                           Table<UUID, DatanodeDetails> nodeDB) {
-    super(conf, scmStorageConfig, eventPublisher, networkTopology);
+    super(conf, scmStorageConfig, eventPublisher, networkTopology,
+        SCMContext.emptyContext());
     this.nodeDB = nodeDB;
     loadExistingNodes();
   }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java
index a8dd3c9..72e0a76 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java
@@ -23,52 +23,69 @@
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineFactory;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManagerV2Impl;
+import org.apache.hadoop.hdds.scm.pipeline.StateManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.ClientVersions;
 
 import com.google.common.annotations.VisibleForTesting;
-import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
+import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED;
 
 /**
  * Recon's overriding implementation of SCM's Pipeline Manager.
  */
-public class ReconPipelineManager extends SCMPipelineManager {
+public final class ReconPipelineManager extends PipelineManagerV2Impl {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(ReconPipelineManager.class);
 
-  public ReconPipelineManager(ConfigurationSource conf,
+  private ReconPipelineManager(ConfigurationSource conf,
+                               SCMHAManager scmhaManager,
+                               NodeManager nodeManager,
+                               StateManager pipelineStateManager,
+                               PipelineFactory pipelineFactory,
+                               EventPublisher eventPublisher,
+                               SCMContext scmContext) {
+    super(conf, scmhaManager, nodeManager, pipelineStateManager,
+        pipelineFactory, eventPublisher, scmContext);
+  }
+
+  public static ReconPipelineManager newReconPipelineManager(
+      ConfigurationSource conf,
       NodeManager nodeManager,
       Table<PipelineID, Pipeline> pipelineStore,
-      EventPublisher eventPublisher)
-      throws IOException {
-    super(conf, nodeManager, pipelineStore, eventPublisher,
-        new PipelineStateManager(),
-        new ReconPipelineFactory());
-    initializePipelineState();
-  }
-  
-  @Override
-  public void triggerPipelineCreation() {
-    // Don't do anything in Recon.
-  }
+      EventPublisher eventPublisher,
+      SCMHAManager scmhaManager,
+      SCMContext scmContext) throws IOException {
 
-  @Override
-  protected void destroyPipeline(Pipeline pipeline) throws IOException {
-    // remove the pipeline from the pipeline manager
-    removePipeline(pipeline.getId());
-  }
+    // Create PipelineStateManager
+    StateManager stateManager = PipelineStateManagerV2Impl
+        .newBuilder()
+        .setPipelineStore(pipelineStore)
+        .setNodeManager(nodeManager)
+        .setRatisServer(scmhaManager.getRatisServer())
+        .setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer())
+        .build();
 
+    // Create PipelineFactory
+    PipelineFactory pipelineFactory = new ReconPipelineFactory();
+
+    return new ReconPipelineManager(conf, scmhaManager, nodeManager,
+        stateManager, pipelineFactory, eventPublisher, scmContext);
+  }
 
   /**
    * Bootstrap Recon's pipeline metadata with that from SCM.
@@ -77,7 +94,7 @@
    */
   void initializePipelines(List<Pipeline> pipelinesFromScm) throws IOException {
 
-    getLock().writeLock().lock();
+    getLock().lock();
     try {
       List<Pipeline> pipelinesInHouse = getPipelines();
       LOG.info("Recon has {} pipelines in house.", pipelinesInHouse.size());
@@ -90,20 +107,21 @@
         } else {
           // Recon already has this pipeline. Just update state and creation
           // time.
-          getStateManager().updatePipelineState(pipeline.getId(),
-              pipeline.getPipelineState());
+          getStateManager().updatePipelineState(
+              pipeline.getId().getProtobuf(),
+              Pipeline.PipelineState.getProtobuf(pipeline.getPipelineState()));
           getPipeline(pipeline.getId()).setCreationTimestamp(
               pipeline.getCreationTimestamp());
         }
         removeInvalidPipelines(pipelinesFromScm);
       }
     } finally {
-      getLock().writeLock().unlock();
+      getLock().unlock();
     }
   }
 
   public void removeInvalidPipelines(List<Pipeline> pipelinesFromScm) {
-    getLock().writeLock().lock();
+    getLock().lock();
     try {
       List<Pipeline> pipelinesInHouse = getPipelines();
       // Removing pipelines in Recon that are no longer in SCM.
@@ -117,21 +135,23 @@
         PipelineID pipelineID = p.getId();
         if (!p.getPipelineState().equals(CLOSED)) {
           try {
-            getStateManager().updatePipelineState(pipelineID, CLOSED);
-          } catch (PipelineNotFoundException e) {
+            getStateManager().updatePipelineState(
+                pipelineID.getProtobuf(),
+                HddsProtos.PipelineState.PIPELINE_CLOSED);
+          } catch (IOException e) {
             LOG.warn("Pipeline {} not found while updating state. ",
                 p.getId(), e);
           }
         }
         try {
           LOG.info("Removing invalid pipeline {} from Recon.", pipelineID);
-          finalizeAndDestroyPipeline(p, false);
+          closePipeline(p, false);
         } catch (IOException e) {
           LOG.warn("Unable to remove pipeline {}", pipelineID, e);
         }
       });
     } finally {
-      getLock().writeLock().unlock();
+      getLock().unlock();
     }
   }
   /**
@@ -140,14 +160,13 @@
    * @throws IOException
    */
   @VisibleForTesting
-  void addPipeline(Pipeline pipeline) throws IOException {
-    getLock().writeLock().lock();
+  public void addPipeline(Pipeline pipeline) throws IOException {
+    getLock().lock();
     try {
-      getPipelineStore().put(pipeline.getId(), pipeline);
-      getStateManager().addPipeline(pipeline);
-      getNodeManager().addPipeline(pipeline);
+      getStateManager().addPipeline(
+          pipeline.getProtobufMessage(ClientVersions.CURRENT_VERSION));
     } finally {
-      getLock().writeLock().unlock();
+      getLock().unlock();
     }
   }
 }
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java
index 246d9ba..589de00 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
@@ -46,9 +47,10 @@
 
   public ReconPipelineReportHandler(SafeModeManager scmSafeModeManager,
       PipelineManager pipelineManager,
+      SCMContext scmContext,
       ConfigurationSource conf,
       StorageContainerServiceProvider scmServiceProvider) {
-    super(scmSafeModeManager, pipelineManager, conf);
+    super(scmSafeModeManager, pipelineManager, scmContext, conf);
     this.scmServiceProvider = scmServiceProvider;
   }
 
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index d45c45d..9ebd3d0 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -30,13 +30,20 @@
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.ReplicationManager;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
+import org.apache.hadoop.hdds.scm.metadata.SCMDBTransactionBufferImpl;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.DeadNodeHandler;
@@ -71,14 +78,20 @@
 public class ReconStorageContainerManagerFacade
     implements OzoneStorageContainerManager {
 
+  // TODO: Fix Recon.
+
   private static final Logger LOG = LoggerFactory
       .getLogger(ReconStorageContainerManagerFacade.class);
 
   private final OzoneConfiguration ozoneConfiguration;
   private final ReconDatanodeProtocolServer datanodeProtocolServer;
   private final EventQueue eventQueue;
+  private final SCMContext scmContext;
   private final SCMStorageConfig scmStorageConfig;
   private final DBStore dbStore;
+  private final SCMHANodeDetails scmHANodeDetails;
+  private final SCMHAManager scmhaManager;
+  private final SequenceIdGenerator sequenceIdGen;
 
   private ReconNodeManager nodeManager;
   private ReconPipelineManager pipelineManager;
@@ -96,14 +109,19 @@
       ContainerHealthSchemaManager containerHealthSchemaManager,
       ContainerDBServiceProvider containerDBServiceProvider)
       throws IOException {
+    scmHANodeDetails = SCMHANodeDetails.loadSCMHAConfig(conf);
     this.eventQueue = new EventQueue();
     eventQueue.setSilent(true);
+    this.scmContext = SCMContext.emptyContext();
     this.ozoneConfiguration = getReconScmConfiguration(conf);
     this.scmStorageConfig = new ReconStorageConfig(conf);
     this.clusterMap = new NetworkTopologyImpl(conf);
-    dbStore = DBStoreBuilder
+    this.dbStore = DBStoreBuilder
         .createDBStore(ozoneConfiguration, new ReconSCMDBDefinition());
-
+    this.scmhaManager = MockSCMHAManager.getInstance(
+        true, new SCMDBTransactionBufferImpl());
+    this.sequenceIdGen = new SequenceIdGenerator(
+        conf, scmhaManager, ReconSCMDBDefinition.SEQUENCE_ID.getTable(dbStore));
     this.nodeManager =
         new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
             ReconSCMDBDefinition.NODES.getTable(dbStore));
@@ -113,15 +131,19 @@
         clusterMap, true, placementMetrics);
     this.datanodeProtocolServer = new ReconDatanodeProtocolServer(
         conf, this, eventQueue);
-    this.pipelineManager =
-        new ReconPipelineManager(conf,
-            nodeManager,
-            ReconSCMDBDefinition.PIPELINES.getTable(dbStore),
-            eventQueue);
+    this.pipelineManager = ReconPipelineManager.newReconPipelineManager(
+        conf,
+        nodeManager,
+        ReconSCMDBDefinition.PIPELINES.getTable(dbStore),
+        eventQueue,
+        scmhaManager,
+        scmContext);
     this.containerManager = new ReconContainerManager(conf,
+        dbStore,
         ReconSCMDBDefinition.CONTAINERS.getTable(dbStore),
-        dbStore, pipelineManager, scmServiceProvider,
-        containerHealthSchemaManager, containerDBServiceProvider);
+        pipelineManager, scmServiceProvider,
+        containerHealthSchemaManager, containerDBServiceProvider,
+        scmhaManager, sequenceIdGen);
     this.scmServiceProvider = scmServiceProvider;
 
     NodeReportHandler nodeReportHandler =
@@ -129,11 +151,11 @@
 
     SafeModeManager safeModeManager = new ReconSafeModeManager();
     ReconPipelineReportHandler pipelineReportHandler =
-        new ReconPipelineReportHandler(
-            safeModeManager, pipelineManager, conf, scmServiceProvider);
+        new ReconPipelineReportHandler(safeModeManager,
+            pipelineManager, scmContext, conf, scmServiceProvider);
 
     PipelineActionHandler pipelineActionHandler =
-        new PipelineActionHandler(pipelineManager, conf);
+        new PipelineActionHandler(pipelineManager, scmContext, conf);
 
     StaleNodeHandler staleNodeHandler =
         new StaleNodeHandler(nodeManager, pipelineManager, conf);
@@ -145,9 +167,10 @@
 
     IncrementalContainerReportHandler icrHandler =
         new ReconIncrementalContainerReportHandler(nodeManager,
-            containerManager);
+            containerManager, scmContext);
     CloseContainerEventHandler closeContainerHandler =
-        new CloseContainerEventHandler(pipelineManager, containerManager);
+        new CloseContainerEventHandler(
+            pipelineManager, containerManager, scmContext);
     ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
     ReconNewNodeHandler newNodeHandler = new ReconNewNodeHandler(nodeManager);
 
@@ -281,7 +304,7 @@
   }
 
   @Override
-  public ContainerManager getContainerManager() {
+  public ContainerManagerV2 getContainerManager() {
     return containerManager;
   }
 
@@ -295,6 +318,11 @@
     return getDatanodeProtocolServer().getDatanodeRpcAddress();
   }
 
+  @Override
+  public SCMNodeDetails getScmNodeDetails() {
+    return scmHANodeDetails.getLocalNodeDetails();
+  }
+
   public EventQueue getEventQueue() {
     return eventQueue;
   }
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml
index 0385fd0..516ac88 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml
@@ -42,7 +42,7 @@
   json-server: 0.15.1
   npm-run-all: 4.1.5
   xo: 0.30.0
-lockfileVersion: 5.1
+lockfileVersion: 5.2
 packages:
   /3d-view/2.0.0:
     dependencies:
@@ -2033,7 +2033,7 @@
       jest-haste-map: 24.9.0
       jest-message-util: 24.9.0
       jest-regex-util: 24.9.0
-      jest-resolve: 24.9.0_jest-resolve@24.9.0
+      jest-resolve: 24.9.0
       jest-resolve-dependencies: 24.9.0
       jest-runner: 24.9.0
       jest-runtime: 24.9.0
@@ -2088,7 +2088,7 @@
       istanbul-lib-source-maps: 3.0.6
       istanbul-reports: 2.2.7
       jest-haste-map: 24.9.0
-      jest-resolve: 24.9.0_jest-resolve@24.9.0
+      jest-resolve: 24.9.0
       jest-runtime: 24.9.0
       jest-util: 24.9.0
       jest-worker: 24.9.0
@@ -2196,7 +2196,7 @@
       integrity: sha1-zlblOfg1UrWNENZy6k1vya3HsjQ=
   /@mapbox/mapbox-gl-supported/1.5.0_mapbox-gl@1.10.1:
     dependencies:
-      mapbox-gl: 1.10.1_mapbox-gl@1.10.1
+      mapbox-gl: 1.10.1
     dev: false
     peerDependencies:
       mapbox-gl: '>=0.32.1 <2.0.0'
@@ -3470,7 +3470,7 @@
       mkdirp: 0.5.5
       pify: 4.0.1
       schema-utils: 2.7.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     engines:
       node: '>= 6.9'
@@ -5016,7 +5016,7 @@
       postcss-modules-values: 3.0.0
       postcss-value-parser: 4.1.0
       schema-utils: 2.7.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     engines:
       node: '>= 8.9.0'
@@ -6176,7 +6176,7 @@
       loader-utils: 1.4.0
       object-hash: 2.0.3
       schema-utils: 2.7.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     engines:
       node: '>= 8.9.0'
@@ -6912,7 +6912,7 @@
     dependencies:
       loader-utils: 1.4.0
       schema-utils: 2.7.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     engines:
       node: '>= 8.9.0'
@@ -8216,7 +8216,7 @@
       pretty-error: 2.1.1
       tapable: 1.1.3
       util.promisify: 1.0.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     engines:
       node: '>=6.9'
@@ -9214,7 +9214,7 @@
       jest-get-type: 24.9.0
       jest-jasmine2: 24.9.0
       jest-regex-util: 24.9.0
-      jest-resolve: 24.9.0_jest-resolve@24.9.0
+      jest-resolve: 24.9.0
       jest-util: 24.9.0
       jest-validate: 24.9.0
       micromatch: 3.1.10
@@ -9403,7 +9403,7 @@
       integrity: sha512-3BEYN5WbSq9wd+SyLDES7AHnjH9A/ROBwmz7l2y+ol+NtSFO8DYiEBzoO1CeFc9a8DYy10EO4dDFVv/wN3zl1w==
   /jest-pnp-resolver/1.2.1_jest-resolve@24.9.0:
     dependencies:
-      jest-resolve: 24.9.0_jest-resolve@24.9.0
+      jest-resolve: 24.9.0
     dev: false
     engines:
       node: '>=6'
@@ -9430,7 +9430,7 @@
       node: '>= 6'
     resolution:
       integrity: sha512-Fm7b6AlWnYhT0BXy4hXpactHIqER7erNgIsIozDXWl5dVm+k8XdGVe1oTg1JyaFnOxarMEbax3wyRJqGP2Pq+g==
-  /jest-resolve/24.9.0_jest-resolve@24.9.0:
+  /jest-resolve/24.9.0:
     dependencies:
       '@jest/types': 24.9.0
       browser-resolve: 1.11.3
@@ -9440,8 +9440,6 @@
     dev: false
     engines:
       node: '>= 6'
-    peerDependencies:
-      jest-resolve: '*'
     resolution:
       integrity: sha512-TaLeLVL1l08YFZAt3zaPtjiVvyy4oSA6CRe+0AFPPVX3Q/VI0giIWWoAvoS5L96vj9Dqxj4fB5p2qrHCmTU/MQ==
   /jest-runner/24.9.0:
@@ -9459,7 +9457,7 @@
       jest-jasmine2: 24.9.0
       jest-leak-detector: 24.9.0
       jest-message-util: 24.9.0
-      jest-resolve: 24.9.0_jest-resolve@24.9.0
+      jest-resolve: 24.9.0
       jest-runtime: 24.9.0
       jest-util: 24.9.0
       jest-worker: 24.9.0
@@ -9487,7 +9485,7 @@
       jest-message-util: 24.9.0
       jest-mock: 24.9.0
       jest-regex-util: 24.9.0
-      jest-resolve: 24.9.0_jest-resolve@24.9.0
+      jest-resolve: 24.9.0
       jest-snapshot: 24.9.0
       jest-util: 24.9.0
       jest-validate: 24.9.0
@@ -9517,7 +9515,7 @@
       jest-get-type: 24.9.0
       jest-matcher-utils: 24.9.0
       jest-message-util: 24.9.0
-      jest-resolve: 24.9.0_jest-resolve@24.9.0
+      jest-resolve: 24.9.0
       mkdirp: 0.5.5
       natural-compare: 1.4.0
       pretty-format: 24.9.0
@@ -10289,7 +10287,7 @@
       node: '>=0.10.0'
     resolution:
       integrity: sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=
-  /mapbox-gl/1.10.1_mapbox-gl@1.10.1:
+  /mapbox-gl/1.10.1:
     dependencies:
       '@mapbox/geojson-rewind': 0.5.0
       '@mapbox/geojson-types': 1.0.2
@@ -10317,8 +10315,6 @@
     dev: false
     engines:
       node: '>=6.4.0'
-    peerDependencies:
-      mapbox-gl: '*'
     resolution:
       integrity: sha512-0aHt+lFUpYfvh0kMIqXqNXqoYMuhuAsMlw87TbhWrw78Tx2zfuPI0Lx31/YPUgJ+Ire0tzQ4JnuBL7acDNXmMg==
   /marching-simplex-table/1.0.0:
@@ -10575,7 +10571,7 @@
       loader-utils: 1.4.0
       normalize-url: 1.9.1
       schema-utils: 1.0.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
       webpack-sources: 1.4.3
     dev: false
     engines:
@@ -11304,7 +11300,7 @@
     dependencies:
       cssnano: 4.1.10
       last-call-webpack-plugin: 3.0.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     peerDependencies:
       webpack: ^4.0.0
@@ -11870,7 +11866,7 @@
       has-hover: 1.0.1
       has-passive-events: 1.0.0
       is-mobile: 2.2.1
-      mapbox-gl: 1.10.1_mapbox-gl@1.10.1
+      mapbox-gl: 1.10.1
       matrix-camera-controller: 2.1.3
       mouse-change: 1.4.0
       mouse-event-offset: 3.0.2
@@ -13658,7 +13654,7 @@
       identity-obj-proxy: 3.0.0
       jest: 24.9.0
       jest-environment-jsdom-fourteen: 1.0.1
-      jest-resolve: 24.9.0_jest-resolve@24.9.0
+      jest-resolve: 24.9.0
       jest-watch-typeahead: 0.4.2
       mini-css-extract-plugin: 0.9.0_webpack@4.42.0
       optimize-css-assets-webpack-plugin: 5.0.3_webpack@4.42.0
@@ -13679,7 +13675,7 @@
       ts-pnp: 1.1.6_typescript@3.4.5
       typescript: 3.4.5
       url-loader: 2.3.0_file-loader@4.3.0+webpack@4.42.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
       webpack-dev-server: 3.10.3_webpack@4.42.0
       webpack-manifest-plugin: 2.2.0_webpack@4.42.0
       workbox-webpack-plugin: 4.3.1_webpack@4.42.0
@@ -14512,7 +14508,7 @@
       neo-async: 2.6.1
       schema-utils: 2.7.0
       semver: 6.3.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     engines:
       node: '>= 8.9.0'
@@ -15583,7 +15579,7 @@
       serialize-javascript: 3.1.0
       source-map: 0.6.1
       terser: 4.7.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
       webpack-sources: 1.4.3
       worker-farm: 1.7.0
     dev: false
@@ -15603,7 +15599,7 @@
       serialize-javascript: 2.1.2
       source-map: 0.6.1
       terser: 4.7.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
       webpack-sources: 1.4.3
     dev: false
     engines:
@@ -16188,7 +16184,7 @@
       loader-utils: 1.4.0
       mime: 2.4.6
       schema-utils: 2.7.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     engines:
       node: '>= 8.9.0'
@@ -16491,7 +16487,7 @@
       mime: 2.4.6
       mkdirp: 0.5.5
       range-parser: 1.2.1
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
       webpack-log: 2.0.0
     dev: false
     engines:
@@ -16531,7 +16527,7 @@
       strip-ansi: 3.0.1
       supports-color: 6.1.0
       url: 0.11.0
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
       webpack-dev-middleware: 3.7.2_webpack@4.42.0
       webpack-log: 2.0.0
       ws: 6.2.1
@@ -16563,7 +16559,7 @@
       lodash: 4.17.15
       object.entries: 1.1.2
       tapable: 1.1.3
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
     dev: false
     engines:
       node: '>=6.11.5'
@@ -16578,7 +16574,7 @@
     dev: false
     resolution:
       integrity: sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==
-  /webpack/4.42.0_webpack@4.42.0:
+  /webpack/4.42.0:
     dependencies:
       '@webassemblyjs/ast': 1.8.5
       '@webassemblyjs/helper-module-context': 1.8.5
@@ -16607,8 +16603,6 @@
     engines:
       node: '>=6.11.5'
     hasBin: true
-    peerDependencies:
-      webpack: '*'
     resolution:
       integrity: sha512-EzJRHvwQyBiYrYqhyjW9AqM90dE4+s1/XtCfn7uWg6cS72zH+2VPFAlsnW0+W0cDi0XRjNKUMoJtpSi50+Ph6w==
   /websocket-driver/0.7.4:
@@ -16819,7 +16813,7 @@
     dependencies:
       '@babel/runtime': 7.10.2
       json-stable-stringify: 1.0.1
-      webpack: 4.42.0_webpack@4.42.0
+      webpack: 4.42.0
       workbox-build: 4.3.1
     dev: false
     engines:
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
index 49aa306..e569cde 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
@@ -52,6 +52,7 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
@@ -71,6 +72,7 @@
 import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
 import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
 import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
+import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager;
 import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
 import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
@@ -97,12 +99,14 @@
 
   private OzoneStorageContainerManager ozoneStorageContainerManager;
   private ReconContainerManager reconContainerManager;
+  private ReconPipelineManager reconPipelineManager;
   private ContainerDBServiceProvider containerDbServiceProvider;
   private ContainerEndpoint containerEndpoint;
   private boolean isSetupDone = false;
   private ContainerHealthSchemaManager containerHealthSchemaManager;
   private ReconOMMetadataManager reconOMMetadataManager;
-  private ContainerID containerID = new ContainerID(1L);
+  private ContainerID containerID = ContainerID.valueOf(1L);
+  private Pipeline pipeline;
   private PipelineID pipelineID;
   private long keyCount = 5L;
 
@@ -116,9 +120,6 @@
         initializeNewOmMetadataManager(temporaryFolder.newFolder()),
         temporaryFolder.newFolder());
 
-    Pipeline pipeline = getRandomPipeline();
-    pipelineID = pipeline.getId();
-
     ReconTestInjector reconTestInjector =
         new ReconTestInjector.Builder(temporaryFolder)
             .withReconSqlDb()
@@ -139,11 +140,17 @@
         reconTestInjector.getInstance(OzoneStorageContainerManager.class);
     reconContainerManager = (ReconContainerManager)
         ozoneStorageContainerManager.getContainerManager();
+    reconPipelineManager = (ReconPipelineManager)
+        ozoneStorageContainerManager.getPipelineManager();
     containerDbServiceProvider =
         reconTestInjector.getInstance(ContainerDBServiceProvider.class);
     containerEndpoint = reconTestInjector.getInstance(ContainerEndpoint.class);
     containerHealthSchemaManager =
         reconTestInjector.getInstance(ContainerHealthSchemaManager.class);
+
+    pipeline = getRandomPipeline();
+    pipelineID = pipeline.getId();
+    reconPipelineManager.addPipeline(pipeline);
   }
 
   @Before
@@ -153,8 +160,6 @@
       initializeInjector();
       isSetupDone = true;
     }
-    //Write Data to OM
-    Pipeline pipeline = getRandomPipeline();
 
     List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
     BlockID blockID1 = new BlockID(1, 101);
@@ -472,9 +477,8 @@
   void putContainerInfos(int num) throws IOException {
     for (int i = 1; i <= num; i++) {
       final ContainerInfo info = newContainerInfo(i);
-      reconContainerManager.getContainerStore().put(new ContainerID(i), info);
-      reconContainerManager.getContainerStateManager().addContainerInfo(
-          i, info, null, null);
+      reconContainerManager.addNewContainer(i,
+          new ContainerWithPipeline(info, pipeline));
     }
   }
 
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
index 0a3546a..0bfa179 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
@@ -52,7 +52,7 @@
     container = mock(ContainerInfo.class);
     when(container.getReplicationFactor())
         .thenReturn(HddsProtos.ReplicationFactor.THREE);
-    when(container.containerID()).thenReturn(new ContainerID(123456));
+    when(container.containerID()).thenReturn(ContainerID.valueOf(123456));
     when(container.getContainerID()).thenReturn((long)123456);
     when(placementPolicy.validateContainerPlacement(
         Mockito.anyList(), Mockito.anyInt()))
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
index 2176a37..98dd593 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
@@ -40,7 +40,7 @@
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault;
 import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
@@ -72,7 +72,7 @@
     ReconStorageContainerManagerFacade scmMock =
         mock(ReconStorageContainerManagerFacade.class);
     MockPlacementPolicy placementMock = new MockPlacementPolicy();
-    ContainerManager containerManagerMock = mock(ContainerManager.class);
+    ContainerManagerV2 containerManagerMock = mock(ContainerManagerV2.class);
     ContainerReplica unhealthyReplicaMock = mock(ContainerReplica.class);
     when(unhealthyReplicaMock.getState()).thenReturn(State.UNHEALTHY);
     ContainerReplica healthyReplicaMock = mock(ContainerReplica.class);
@@ -87,19 +87,19 @@
       when(containerManagerMock.getContainer(c.containerID())).thenReturn(c);
     }
     // Under replicated
-    when(containerManagerMock.getContainerReplicas(new ContainerID(1L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L)))
         .thenReturn(getMockReplicas(1L, State.CLOSED, State.UNHEALTHY));
 
     // return one UNHEALTHY replica for container ID 2 -> Missing
-    when(containerManagerMock.getContainerReplicas(new ContainerID(2L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L)))
         .thenReturn(getMockReplicas(2L, State.UNHEALTHY));
 
     // return 0 replicas for container ID 3 -> Missing
-    when(containerManagerMock.getContainerReplicas(new ContainerID(3L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L)))
         .thenReturn(Collections.emptySet());
 
     // Return 5 Healthy -> Over replicated
-    when(containerManagerMock.getContainerReplicas(new ContainerID(4L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L)))
         .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED,
         State.CLOSED, State.CLOSED, State.CLOSED));
 
@@ -108,11 +108,11 @@
         State.CLOSED, State.CLOSED, State.CLOSED);
     placementMock.setMisRepWhenDnPresent(
         misReplicas.iterator().next().getDatanodeDetails().getUuid());
-    when(containerManagerMock.getContainerReplicas(new ContainerID(5L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(5L)))
         .thenReturn(misReplicas);
 
     // Return 3 Healthy -> Healthy container
-    when(containerManagerMock.getContainerReplicas(new ContainerID(6L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(6L)))
         .thenReturn(getMockReplicas(6L,
             State.CLOSED, State.CLOSED, State.CLOSED));
 
@@ -162,20 +162,20 @@
     // Now run the job again, to check that relevant records are updated or
     // removed as appropriate. Need to adjust the return value for all the mocks
     // Under replicated -> Delta goes from 2 to 1
-    when(containerManagerMock.getContainerReplicas(new ContainerID(1L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L)))
         .thenReturn(getMockReplicas(1L, State.CLOSED, State.CLOSED));
 
     // ID 2 was missing - make it healthy now
-    when(containerManagerMock.getContainerReplicas(new ContainerID(2L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L)))
         .thenReturn(getMockReplicas(2L,
             State.CLOSED, State.CLOSED, State.CLOSED));
 
     // return 0 replicas for container ID 3 -> Still Missing
-    when(containerManagerMock.getContainerReplicas(new ContainerID(3L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L)))
         .thenReturn(Collections.emptySet());
 
     // Return 4 Healthy -> Delta changes from -2 to -1
-    when(containerManagerMock.getContainerReplicas(new ContainerID(4L)))
+    when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L)))
         .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED,
             State.CLOSED, State.CLOSED));
 
@@ -213,7 +213,7 @@
       replicas.add(ContainerReplica.newBuilder()
           .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
           .setContainerState(s)
-          .setContainerID(new ContainerID(containerId))
+          .setContainerID(ContainerID.valueOf(containerId))
           .setSequenceId(1)
           .build());
     }
@@ -227,7 +227,7 @@
       when(c.getContainerID()).thenReturn((long)i);
       when(c.getReplicationFactor())
           .thenReturn(HddsProtos.ReplicationFactor.THREE);
-      when(c.containerID()).thenReturn(new ContainerID(i));
+      when(c.containerID()).thenReturn(ContainerID.valueOf(i));
       containers.add(c);
     }
     return containers;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
index 4932253..74bbdf1 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
@@ -59,7 +59,7 @@
     container = mock(ContainerInfo.class);
     when(container.getReplicationFactor())
         .thenReturn(HddsProtos.ReplicationFactor.THREE);
-    when(container.containerID()).thenReturn(new ContainerID(123456));
+    when(container.containerID()).thenReturn(ContainerID.valueOf(123456));
     when(container.getContainerID()).thenReturn((long)123456);
     when(placementPolicy.validateContainerPlacement(
         Mockito.anyList(), Mockito.anyInt()))
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 377c356..b591bb6 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -25,6 +25,11 @@
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHADBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -66,6 +71,9 @@
   private ReconPipelineManager pipelineManager;
   private ReconContainerManager containerManager;
   private DBStore store;
+  private SCMHAManager scmhaManager;
+  private SCMContext scmContext;
+  private SequenceIdGenerator sequenceIdGen;
 
   @Before
   public void setUp() throws Exception {
@@ -74,21 +82,33 @@
         temporaryFolder.newFolder().getAbsolutePath());
     conf.set(OZONE_SCM_NAMES, "localhost");
     store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
+    scmhaManager = MockSCMHAManager.getInstance(
+        true, new MockSCMHADBTransactionBuffer(store));
+    sequenceIdGen = new SequenceIdGenerator(
+        conf, scmhaManager, ReconSCMDBDefinition.SEQUENCE_ID.getTable(store));
+    scmContext = SCMContext.emptyContext();
     scmStorageConfig = new ReconStorageConfig(conf);
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
-    NodeManager nodeManager =
-        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
-    pipelineManager = new ReconPipelineManager(conf, nodeManager,
-        ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue);
+    NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig,
+        eventQueue, clusterMap, scmContext);
+    pipelineManager = ReconPipelineManager.newReconPipelineManager(
+        conf,
+        nodeManager,
+        ReconSCMDBDefinition.PIPELINES.getTable(store),
+        eventQueue,
+        scmhaManager,
+        scmContext);
     containerManager = new ReconContainerManager(
         conf,
-        ReconSCMDBDefinition.CONTAINERS.getTable(store),
         store,
+        ReconSCMDBDefinition.CONTAINERS.getTable(store),
         pipelineManager,
         getScmServiceProvider(),
         mock(ContainerHealthSchemaManager.class),
-        mock(ContainerDBServiceProvider.class));
+        mock(ContainerDBServiceProvider.class),
+        scmhaManager,
+        sequenceIdGen);
   }
 
   @After
@@ -115,7 +135,7 @@
     Pipeline pipeline = getRandomPipeline();
     getPipelineManager().addPipeline(pipeline);
 
-    ContainerID containerID = new ContainerID(100L);
+    ContainerID containerID = ContainerID.valueOf(100L);
     ContainerInfo containerInfo =
         new ContainerInfo.Builder()
             .setContainerID(containerID.getId())
@@ -142,7 +162,7 @@
 
   protected ContainerWithPipeline getTestContainer(LifeCycleState state)
       throws IOException {
-    ContainerID containerID = new ContainerID(100L);
+    ContainerID containerID = ContainerID.valueOf(100L);
     Pipeline pipeline = getRandomPipeline();
     pipelineManager.addPipeline(pipeline);
     ContainerInfo containerInfo =
@@ -161,7 +181,7 @@
   protected ContainerWithPipeline getTestContainer(long id,
                                                    LifeCycleState state)
       throws IOException {
-    ContainerID containerID = new ContainerID(id);
+    ContainerID containerID = ContainerID.valueOf(id);
     Pipeline pipeline = getRandomPipeline();
     pipelineManager.addPipeline(pipeline);
     ContainerInfo containerInfo =
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
index 1fe32d1..4da43d8 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
@@ -22,6 +22,7 @@
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN;
+import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -40,7 +41,8 @@
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -49,7 +51,6 @@
  */
 public class TestReconContainerManager
     extends AbstractReconContainerManagerTest {
-
   @Test
   public void testAddNewOpenContainer() throws IOException {
     ContainerWithPipeline containerWithPipeline =
@@ -59,13 +60,13 @@
     ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
 
     ReconContainerManager containerManager = getContainerManager();
-    assertFalse(containerManager.exists(containerID));
+    assertFalse(containerManager.containerExist(containerID));
     assertFalse(getContainerTable().isExist(containerID));
 
     containerManager.addNewContainer(
         containerID.getId(), containerWithPipeline);
 
-    assertTrue(containerManager.exists(containerID));
+    assertTrue(containerManager.containerExist(containerID));
 
     List<ContainerInfo> containers =
         containerManager.getContainers(LifeCycleState.OPEN);
@@ -78,6 +79,8 @@
     assertEquals(containerID, containersInPipeline.first());
 
     // Verify container DB.
+    SCMHAManager scmhaManager = containerManager.getSCMHAManager();
+    scmhaManager.getDBTransactionBuffer().close();
     assertTrue(getContainerTable().isExist(containerID));
   }
 
@@ -89,35 +92,37 @@
     ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
 
     ReconContainerManager containerManager = getContainerManager();
-    assertFalse(containerManager.exists(containerID));
+    assertFalse(containerManager.containerExist(containerID));
     assertFalse(getContainerTable().isExist(containerID));
 
     containerManager.addNewContainer(
         containerID.getId(), containerWithPipeline);
 
-    assertTrue(containerManager.exists(containerID));
+    assertTrue(containerManager.containerExist(containerID));
 
     List<ContainerInfo> containers = containerManager.getContainers(CLOSED);
     assertEquals(1, containers.size());
     assertEquals(containerInfo, containers.get(0));
     // Verify container DB.
+    SCMHAManager scmhaManager = containerManager.getSCMHAManager();
+    scmhaManager.getDBTransactionBuffer().close();
     assertTrue(getContainerTable().isExist(containerID));
   }
 
   @Test
   public void testCheckAndAddNewContainer() throws IOException {
-    ContainerID containerID = new ContainerID(100L);
+    ContainerID containerID = ContainerID.valueOf(100L);
     ReconContainerManager containerManager = getContainerManager();
-    assertFalse(containerManager.exists(containerID));
+    assertFalse(containerManager.containerExist(containerID));
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
     containerManager.checkAndAddNewContainer(containerID,
         OPEN, datanodeDetails);
-    assertTrue(containerManager.exists(containerID));
+    assertTrue(containerManager.containerExist(containerID));
 
     // Doing it one more time should not change any state.
     containerManager.checkAndAddNewContainer(containerID, OPEN,
         datanodeDetails);
-    assertTrue(containerManager.exists(containerID));
+    assertTrue(containerManager.containerExist(containerID));
     assertEquals(LifeCycleState.OPEN,
         getContainerManager().getContainer(containerID).getState());
   }
@@ -146,7 +151,7 @@
         getContainerManager().getContainer(containerID).getState());
   }
 
-  ContainerInfo newContainerInfo(long containerId) {
+  ContainerInfo newContainerInfo(long containerId, Pipeline pipeline) {
     return new ContainerInfo.Builder()
         .setContainerID(containerId)
         .setReplicationType(HddsProtos.ReplicationType.RATIS)
@@ -154,27 +159,17 @@
         .setOwner("owner2")
         .setNumberOfKeys(99L)
         .setReplicationFactor(HddsProtos.ReplicationFactor.THREE)
-        .setPipelineID(PipelineID.randomId())
+        .setPipelineID(pipeline.getId())
         .build();
   }
 
-  void putContainerInfos(ReconContainerManager containerManager, int num)
-      throws IOException {
-    for (int i = 1; i <= num; i++) {
-      final ContainerInfo info = newContainerInfo(i);
-      containerManager.getContainerStore().put(new ContainerID(i), info);
-      containerManager.getContainerStateManager()
-          .addContainerInfo(i, info, null, null);
-    }
-  }
-
   @Test
   public void testUpdateAndRemoveContainerReplica() throws IOException {
     // Sanity checking updateContainerReplica and ContainerReplicaHistory
 
     // Init Container 1
     final long cIDlong1 = 1L;
-    final ContainerID containerID1 = new ContainerID(cIDlong1);
+    final ContainerID containerID1 = ContainerID.valueOf(cIDlong1);
 
     // Init DN01
     final UUID uuid1 = UUID.randomUUID();
@@ -191,7 +186,14 @@
     Assert.assertEquals(0, repHistMap.size());
 
     // Put a replica info and call updateContainerReplica
-    putContainerInfos(containerManager, 10);
+    Pipeline pipeline = getRandomPipeline();
+    getPipelineManager().addPipeline(pipeline);
+    for (int i = 1; i <= 10; i++) {
+      final ContainerInfo info = newContainerInfo(i, pipeline);
+      containerManager.addNewContainer(i,
+          new ContainerWithPipeline(info, pipeline));
+    }
+
     containerManager.updateContainerReplica(containerID1, containerReplica1);
     // Should have 1 container entry in the replica history map
     Assert.assertEquals(1, repHistMap.size());
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index 1b42f21..54f8220 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -41,6 +41,7 @@
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -62,7 +63,7 @@
   @Test
   public void testProcessICR() throws IOException, NodeNotFoundException {
 
-    ContainerID containerID = new ContainerID(100L);
+    ContainerID containerID = ContainerID.valueOf(100L);
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
     IncrementalContainerReportFromDatanode reportMock =
         mock(IncrementalContainerReportFromDatanode.class);
@@ -81,19 +82,19 @@
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
     SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
-    NodeManager nodeManager =
-        new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap);
+    NodeManager nodeManager = new SCMNodeManager(conf, storageConfig,
+        eventQueue, clusterMap, SCMContext.emptyContext());
     nodeManager.register(datanodeDetails, null, null);
 
     ReconContainerManager containerManager = getContainerManager();
     ReconIncrementalContainerReportHandler reconIcr =
         new ReconIncrementalContainerReportHandler(nodeManager,
-            containerManager);
+            containerManager, SCMContext.emptyContext());
     EventPublisher eventPublisherMock = mock(EventPublisher.class);
 
     reconIcr.onMessage(reportMock, eventPublisherMock);
     nodeManager.addContainer(datanodeDetails, containerID);
-    assertTrue(containerManager.exists(containerID));
+    assertTrue(containerManager.containerExist(containerID));
     assertEquals(1, containerManager.getContainerReplicas(containerID).size());
     assertEquals(OPEN, containerManager.getContainer(containerID).getState());
   }
@@ -130,10 +131,10 @@
       when(reportMock.getReport()).thenReturn(containerReport);
       ReconIncrementalContainerReportHandler reconIcr =
           new ReconIncrementalContainerReportHandler(nodeManagerMock,
-              containerManager);
+              containerManager, SCMContext.emptyContext());
 
       reconIcr.onMessage(reportMock, mock(EventPublisher.class));
-      assertTrue(containerManager.exists(containerID));
+      assertTrue(containerManager.containerExist(containerID));
       assertEquals(1,
           containerManager.getContainerReplicas(containerID).size());
       LifeCycleState expectedState = getContainerStateFromReplicaState(state);
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
index b190810..9cbf2d2 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
@@ -26,6 +26,10 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHADBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -65,6 +69,8 @@
   private OzoneConfiguration conf;
   private SCMStorageConfig scmStorageConfig;
   private DBStore store;
+  private SCMHAManager scmhaManager;
+  private SCMContext scmContext;
 
   @Before
   public void setup() throws IOException {
@@ -74,6 +80,9 @@
     conf.set(OZONE_SCM_NAMES, "localhost");
     scmStorageConfig = new ReconStorageConfig(conf);
     store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
+    scmhaManager = MockSCMHAManager.getInstance(
+        true, new MockSCMHADBTransactionBuffer(store));
+    scmContext = SCMContext.emptyContext();
   }
 
   @After
@@ -109,12 +118,17 @@
 
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
-    NodeManager nodeManager =
-        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
+    NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig,
+        eventQueue, clusterMap, SCMContext.emptyContext());
 
     try (ReconPipelineManager reconPipelineManager =
-        new ReconPipelineManager(conf, nodeManager,
-            ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue)) {
+             ReconPipelineManager.newReconPipelineManager(
+                 conf,
+                 nodeManager,
+                 ReconSCMDBDefinition.PIPELINES.getTable(store),
+                 eventQueue,
+                 scmhaManager,
+                 scmContext)) {
       reconPipelineManager.addPipeline(validPipeline);
       reconPipelineManager.addPipeline(invalidPipeline);
 
@@ -145,12 +159,18 @@
     Pipeline pipeline = getRandomPipeline();
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
     EventQueue eventQueue = new EventQueue();
-    NodeManager nodeManager =
-        new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
+    NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig,
+        eventQueue, clusterMap, SCMContext.emptyContext());
 
     ReconPipelineManager reconPipelineManager =
-        new ReconPipelineManager(conf, nodeManager,
-            ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue);
+        ReconPipelineManager.newReconPipelineManager(
+            conf,
+            nodeManager,
+            ReconSCMDBDefinition.PIPELINES.getTable(store),
+            eventQueue,
+            scmhaManager,
+            scmContext);
+
     assertFalse(reconPipelineManager.containsPipeline(pipeline.getId()));
     reconPipelineManager.addPipeline(pipeline);
     assertTrue(reconPipelineManager.containsPipeline(pipeline.getId()));
@@ -161,9 +181,15 @@
 
     NodeManager nodeManagerMock = mock(NodeManager.class);
 
-    ReconPipelineManager reconPipelineManager = new ReconPipelineManager(
-        conf, nodeManagerMock, ReconSCMDBDefinition.PIPELINES.getTable(store),
-        new EventQueue());
+    ReconPipelineManager reconPipelineManager =
+        ReconPipelineManager.newReconPipelineManager(
+            conf,
+            nodeManagerMock,
+            ReconSCMDBDefinition.PIPELINES.getTable(store),
+            new EventQueue(),
+            scmhaManager,
+            scmContext);
+
     PipelineFactory pipelineFactory = reconPipelineManager.getPipelineFactory();
     assertTrue(pipelineFactory instanceof ReconPipelineFactory);
     ReconPipelineFactory reconPipelineFactory =
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
index e72df36..fcf67a4 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -62,7 +63,8 @@
 
     ReconPipelineReportHandler handler =
         new ReconPipelineReportHandler(new ReconSafeModeManager(),
-            reconPipelineManagerMock, configuration, scmServiceProviderMock);
+            reconPipelineManagerMock, SCMContext.emptyContext(),
+            configuration, scmServiceProviderMock);
 
     EventPublisher eventPublisherMock = mock(EventPublisher.class);
     PipelineReport report = mock(PipelineReport.class);
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java
index 31b0891..28ab6fe 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.security.SecurityUtil;
 
 import javax.annotation.Nonnull;
@@ -59,7 +60,7 @@
     int counter = 0;
     for (String nodeId : omNodeIds) {
       counter++;
-      String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
+      String rpcAddrKey = ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
           serviceId, nodeId);
       String rpcAddrStr = OmUtils.getOmRpcAddress(configuration, rpcAddrKey);
       if (rpcAddrStr == null || rpcAddrStr.isEmpty()) {
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java
index 8892a97..e63aa1e 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java
@@ -20,6 +20,7 @@
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -58,11 +59,14 @@
     Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration,
         serviceID);
 
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+    configuration.set(
+        ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
         serviceID, "om1"), "om1:9862");
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+    configuration.set(
+        ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
         serviceID, "om2"), "om2:9862");
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+    configuration.set(
+        ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
         serviceID, "om3"), "om3:9862");
 
     String expectedOmServiceAddress = buildServiceAddress(nodeIDList);
@@ -83,9 +87,11 @@
 
     // Don't set om3 node rpc address. Here we are skipping setting of one of
     // the OM address. So buildServiceNameForToken will fail.
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+    configuration.set(
+        ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
         serviceID, "om1"), "om1:9862");
-    configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+    configuration.set(
+        ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
         serviceID, "om2"), "om2:9862");
 
 
@@ -114,7 +120,7 @@
     int counter = 0;
     for (String nodeID : nodeIDList) {
       counter++;
-      String addr = configuration.get(OmUtils.addKeySuffixes(
+      String addr = configuration.get(ConfUtils.addKeySuffixes(
           OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, nodeID));
 
       if (counter != nodesLength) {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java
new file mode 100644
index 0000000..c784c44
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.admin.scm;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import picocli.CommandLine;
+
+/**
+ * Handler of scm status command.
+ */
+@CommandLine.Command(
+    name = "roles",
+    description = "List all SCMs and their respective Ratis server roles",
+    mixinStandardHelpOptions = true,
+    versionProvider = HddsVersionProvider.class)
+public class GetScmRatisRolesSubcommand extends ScmSubcommand {
+
+  @CommandLine.ParentCommand
+  private ScmAdmin parent;
+
+  @Override
+  protected void execute(ScmClient scmClient) throws IOException {
+    List<String> roles = scmClient.getScmRatisRoles();
+    System.out.println(roles);
+  }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
new file mode 100644
index 0000000..d745a6a
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.admin.scm;
+
+import org.apache.hadoop.hdds.cli.GenericCli;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.cli.OzoneAdmin;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine;
+import picocli.CommandLine.Model.CommandSpec;
+import picocli.CommandLine.Spec;
+
+/**
+ * Subcommand for admin operations related to SCM.
+ */
+@CommandLine.Command(
+    name = "scm",
+    description = "Ozone Storage Container Manager specific admin operations",
+    mixinStandardHelpOptions = true,
+    versionProvider = HddsVersionProvider.class,
+    subcommands = {
+        GetScmRatisRolesSubcommand.class
+    })
+@MetaInfServices(SubcommandWithParent.class)
+public class ScmAdmin extends GenericCli  implements SubcommandWithParent {
+
+  @CommandLine.ParentCommand
+  private OzoneAdmin parent;
+
+  @Spec
+  private CommandSpec spec;
+
+  public OzoneAdmin getParent() {
+    return parent;
+  }
+
+  @Override
+  public Void call() throws Exception {
+    GenericCli.missingSubcommand(spec);
+    return null;
+  }
+
+  @Override
+  public Class<?> getParentType() {
+    return OzoneAdmin.class;
+  }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java
similarity index 81%
copy from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
copy to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java
index 4944017..ec15a33 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java
@@ -6,17 +6,17 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdds.scm.ratis;
 
 /**
- * This package contains classes related to Apache Ratis for SCM.
+ * SCM related Admin tools.
  */
+package org.apache.hadoop.ozone.admin.scm;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DatanodeLayout.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DatanodeLayout.java
new file mode 100644
index 0000000..2bb121d
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DatanodeLayout.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.debug;
+
+import org.apache.hadoop.hdds.cli.GenericCli;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine.Spec;
+import picocli.CommandLine.Model.CommandSpec;
+
+import picocli.CommandLine;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+
+/**
+ * Tool to upgrade Datanode layout.
+ */
+@CommandLine.Command(
+    name = "dnlayout",
+    description = "Shell of updating datanode layout format",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true)
+@MetaInfServices(SubcommandWithParent.class)
+public class DatanodeLayout extends GenericCli
+    implements Callable<Void>, SubcommandWithParent{
+
+  @CommandLine.Option(names = {"--path"},
+      description = "File Path")
+  private String storagePath;
+
+  @CommandLine.Option(names = {"--verify"},
+      hidden = true,
+      description = "Verify that the datanode layout is correct")
+  private boolean verify;
+
+  @Spec
+  private CommandSpec spec;
+
+  @Override
+  public Void call() throws Exception {
+    OzoneConfiguration conf = createOzoneConfiguration();
+
+    runUpgrade(conf, storagePath, verify);
+    return null;
+  }
+
+  public static void main(String[] args) {
+    new DatanodeLayout().run(args);
+  }
+
+  @Override
+  public Class<?> getParentType() {
+    return OzoneDebug.class;
+  }
+
+  public static List<HddsVolume> runUpgrade(OzoneConfiguration conf,
+        String storagePath, boolean verify) throws Exception {
+    if (storagePath != null) {
+      conf.unset(HDDS_DATANODE_DIR_KEY);
+      conf.set(HDDS_DATANODE_DIR_KEY, storagePath);
+    }
+
+    if (verify) {
+      conf.setBoolean(
+          ScmConfigKeys.HDDS_DATANODE_UPGRADE_LAYOUT_INLINE, false);
+    }
+
+    MutableVolumeSet volumeSet = new MutableVolumeSet(conf);
+    ContainerSet containerSet = new ContainerSet();
+    OzoneContainer.buildContainerSet(volumeSet, containerSet, conf);
+    volumeSet.shutdown();
+
+    if (verify) {
+      for (HddsVolume vol : volumeSet.getFailedVolumesList()) {
+        System.out.println("Failed Volume:" + vol.getHddsRootDir());
+      }
+    }
+    return volumeSet.getFailedVolumesList();
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ExportContainer.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ExportContainer.java
index 999df478..dab4c9c 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ExportContainer.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ExportContainer.java
@@ -96,7 +96,7 @@
 
     String datanodeUuid = getDatanodeUUID(firstStorageDir, conf);
 
-    String scmId = getScmId(firstStorageDir);
+    String clusterId = getClusterId(firstStorageDir);
 
     MutableVolumeSet volumeSet = new MutableVolumeSet(datanodeUuid, conf);
 
@@ -113,7 +113,7 @@
               metrics,
               containerReplicaProto -> {
               });
-      handler.setScmID(scmId);
+      handler.setClusterID(clusterId);
       handlers.put(containerType, handler);
     }
 
@@ -151,7 +151,7 @@
     return null;
   }
 
-  public String getScmId(String storageDir) throws IOException {
+  public String getClusterId(String storageDir) throws IOException {
     Preconditions.checkNotNull(storageDir);
     final Path firstStorageDirPath = Files.list(Paths.get(storageDir, "hdds"))
         .filter(Files::isDirectory)
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
index cd0663c..3e88148 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
@@ -18,7 +18,6 @@
 
 import java.io.IOException;
 import java.io.InputStream;
-import java.net.InetSocketAddress;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
@@ -34,12 +33,10 @@
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
@@ -60,7 +57,6 @@
 import io.opentracing.util.GlobalTracer;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.lang3.RandomStringUtils;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY;
 import org.apache.ratis.protocol.ClientId;
 import org.slf4j.Logger;
@@ -346,24 +342,13 @@
   }
 
   public StorageContainerLocationProtocol createStorageContainerLocationClient(
-      OzoneConfiguration ozoneConf)
-      throws IOException {
-
-    long version = RPC.getProtocolVersion(
-        StorageContainerLocationProtocolPB.class);
-    InetSocketAddress scmAddress =
-        getScmAddressForClients(ozoneConf);
-
-    RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
+      OzoneConfiguration ozoneConf) {
+    SCMContainerLocationFailoverProxyProvider proxyProvider =
+        new SCMContainerLocationFailoverProxyProvider(ozoneConf);
     StorageContainerLocationProtocol client =
         TracingUtil.createProxy(
             new StorageContainerLocationProtocolClientSideTranslatorPB(
-                RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
-                    scmAddress, UserGroupInformation.getCurrentUser(),
-                    ozoneConf,
-                    NetUtils.getDefaultSocketFactory(ozoneConf),
-                    Client.getRpcTimeout(ozoneConf))),
+                proxyProvider),
             StorageContainerLocationProtocol.class, ozoneConf);
     return client;
   }
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
index ad2810a..6f03af1 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java
@@ -186,7 +186,7 @@
               metrics,
               containerReplicaProto -> {
               });
-      handler.setScmID(UUID.randomUUID().toString());
+      handler.setClusterID(UUID.randomUUID().toString());
       handlers.put(containerType, handler);
     }
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 31c8736..7eced3c 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -113,7 +113,7 @@
           containerType, conf, "datanodeid",
           containerSet, volumeSet, metrics,
           c -> {});
-      handler.setScmID("scm");
+      handler.setClusterID("scm");
       handlers.put(containerType, handler);
     }
     dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers,
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
index bc2a1ea..7071a6f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java
@@ -132,7 +132,7 @@
       // writes the version file properties
       scmStore.initialize();
     }
-    return new StorageContainerManager(conf, configurator);
+    return StorageContainerManager.createSCM(conf, configurator);
   }
 
   static void configureSCM(OzoneConfiguration conf, int numHandlers) {