Merge branch 'trunk' into HDFS-7240
diff --git a/LICENSE.txt b/LICENSE.txt
index 447c609..75c5562 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -693,6 +693,73 @@
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 THE SOFTWARE.
 
+
+For:
+./hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/nvd3-1.8.5.* (css and js files)
+--------------------------------------------------------------------------------
+Copyright (c) 2011-2014 Novus Partners, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
+file except in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software distributed under the
+ License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ express or implied. See the License for the specific language governing permissions and
+  limitations under the License.
+
+
+
+For:
+hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/angular-nvd3-1.0.9.min.js
+--------------------------------------------------------------------------------
+The MIT License (MIT)
+Copyright (c) 2014 Konstantin Skipor
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+and associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
+LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+For:
+hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/angular-1.6.4.min.js
+hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/angular-route-1.6.4.min.js
+--------------------------------------------------------------------------------
+The MIT License
+
+Copyright (c) 2010-2017 Google, Inc. http://angularjs.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
+
+
 For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
@@ -793,6 +860,7 @@
 
 For:
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3.v3.js
+hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/d3-3.5.17.min.js
 --------------------------------------------------------------------------------
 
 D3 is available under a 3-clause BSD license. For details, see:
diff --git a/dev-support/bin/dist-layout-stitching b/dev-support/bin/dist-layout-stitching
index f3db542..6557161 100755
--- a/dev-support/bin/dist-layout-stitching
+++ b/dev-support/bin/dist-layout-stitching
@@ -21,6 +21,9 @@
 # project.build.directory
 BASEDIR=$2
 
+#hdds.version
+HDDS_VERSION=$3
+
 function run()
 {
   declare res
@@ -132,7 +135,6 @@
 run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-rbf/target/hadoop-hdfs-rbf-${VERSION}" .
 run copy "${ROOT}/hadoop-yarn-project/target/hadoop-yarn-project-${VERSION}" .
 run copy "${ROOT}/hadoop-mapreduce-project/target/hadoop-mapreduce-${VERSION}" .
-run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERSION}" .
 
 #copy httpfs and kms as is
 run cp -pr "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${VERSION}"/* .
@@ -144,6 +146,24 @@
 run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-${VERSION}.jar" share/hadoop/client/
 run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-${VERSION}.jar" share/hadoop/client/
 
+# HDDS
+run copy "${ROOT}/hadoop-hdds/common/target/hadoop-hdds-common-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/framework/target/hadoop-hdds-server-framework-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/server-scm/target/hadoop-hdds-server-scm-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/container-service/target/hadoop-hdds-container-service-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/client/target/hadoop-hdds-client-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-hdds/tools/target/hadoop-hdds-tools-${HDDS_VERSION}" .
+
+# Ozone
+run copy "${ROOT}/hadoop-ozone/common/target/hadoop-ozone-common-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/ozone-manager/target/hadoop-ozone-ozone-manager-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/client/target/hadoop-ozone-client-${HDDS_VERSION}" .
+run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}" .
+
+run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERSION}" .
+
+
 echo
 echo "Hadoop dist layout available at: ${BASEDIR}/hadoop-${VERSION}"
 echo
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index d6bf779..369c606 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -198,6 +198,9 @@
 RUN echo 'forrest.home=/opt/apache-forrest' > build.properties
 ENV FORREST_HOME=/opt/apache-forrest
 
+# Hugo static website generator (for new hadoop site and Ozone docs)
+RUN curl -L -o hugo.deb https://github.com/gohugoio/hugo/releases/download/v0.30.2/hugo_0.30.2_Linux-64bit.deb && dpkg --install hugo.deb && rm hugo.deb
+
 # Add a welcome message and environment checks.
 ADD hadoop_env_checks.sh /root/hadoop_env_checks.sh
 RUN chmod 755 /root/hadoop_env_checks.sh
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml
new file mode 100644
index 0000000..b1e039f
--- /dev/null
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml
@@ -0,0 +1,56 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
+  <id>hadoop-src</id>
+  <formats>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>true</includeBaseDirectory>
+  <fileSets>
+    <fileSet>
+      <directory>.</directory>
+      <includes>
+        <include>LICENCE.txt</include>
+        <include>README.txt</include>
+        <include>NOTICE.txt</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>.</directory>
+      <useDefaultExcludes>true</useDefaultExcludes>
+      <excludes>
+        <exclude>.git/**</exclude>
+        <exclude>**/.gitignore</exclude>
+        <exclude>**/.svn</exclude>
+        <exclude>**/*.iws</exclude>
+        <exclude>**/*.ipr</exclude>
+        <exclude>**/*.iml</exclude>
+        <exclude>**/.classpath</exclude>
+        <exclude>**/.project</exclude>
+        <exclude>**/.settings</exclude>
+        <exclude>**/target/**</exclude>
+        <!-- until the code that does this is fixed -->
+        <exclude>**/*.log</exclude>
+        <exclude>**/build/**</exclude>
+        <exclude>**/file:/**</exclude>
+        <exclude>**/SecurityAuth.audit*</exclude>
+      </excludes>
+    </fileSet>
+  </fileSets>
+</assembly>
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
index b1e039f..f0a8d44 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
@@ -50,6 +50,8 @@
         <exclude>**/build/**</exclude>
         <exclude>**/file:/**</exclude>
         <exclude>**/SecurityAuth.audit*</exclude>
+        <exclude>hadoop-ozone/**</exclude>
+        <exclude>hadoop-hdds/**</exclude>
       </excludes>
     </fileSet>
   </fileSets>
diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh b/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
index fb9f4f9..f8c6a15 100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/src/test/resources/ensure-jars-have-correct-contents.sh
@@ -43,6 +43,12 @@
 #   * Used by JavaSandboxLinuxContainerRuntime as a default, loaded
 #     from root, so can't relocate. :(
 allowed_expr+="|^java.policy$"
+# * allowing native libraries from rocksdb. Leaving native libraries as it is.
+allowed_expr+="|^librocksdbjni-linux32.so"
+allowed_expr+="|^librocksdbjni-linux64.so"
+allowed_expr+="|^librocksdbjni-osx.jnilib"
+allowed_expr+="|^librocksdbjni-win64.dll"
+allowed_expr+="|^librocksdbjni-linux-ppc64le.so"
 
 
 allowed_expr+=")"
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index a443648..da519a3 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -647,6 +647,13 @@
                         <exclude>xml.xsd</exclude>
                       </excludes>
                     </filter>
+                    <!-- filtering HISTORY-JAVA.md from rocksdb jar -->
+                    <filter>
+                    <artifact>org.rocksdb:rocksdbjni</artifact>
+                    <excludes>
+                      <exclude>HISTORY-JAVA.md</exclude>
+                    </excludes>
+                    </filter>
                     <filter>
                       <!-- skip jetty license info already incorporated into LICENSE/NOTICE -->
                       <artifact>org.eclipse.jetty:*</artifact>
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 363adf5..532fae9 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -157,6 +157,7 @@
                       <!-- Leave javax APIs that are stable -->
                       <!-- the jdk ships part of the javax.annotation namespace, so if we want to relocate this we'll have to care it out by class :( -->
                       <exclude>com.google.code.findbugs:jsr305</exclude>
+                      <exclude>io.dropwizard.metrics:metrics-core</exclude>
                     </excludes>
                   </artifactSet>
                   <filters>
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 9ef48b6..bee1430 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -596,6 +596,11 @@
   YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
   MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
   MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
+  HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"}
+  HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"}
+  OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"}
+  OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"}
+
   HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
   HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
   HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 24aacdf..6573a81 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -402,7 +402,24 @@
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
 # export HDFS_DFSROUTER_OPTS=""
+
 ###
+# HDFS Key Space Manager specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Key Space Manager.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_KSM_OPTS=""
+
+###
+# HDFS StorageContainerManager specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Storage Container Manager.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_STORAGECONTAINERMANAGER_OPTS=""
 
 ###
 # Advanced Users Only!
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 6933cf4..5783013 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -293,6 +293,40 @@
 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
 
+
+# Fair scheduler requests log on state dump
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=false
+log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender
+log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log
+log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout
+log4j.appender.FSLOGGER.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+#
+# Add a logger for ozone that is separate from the Datanode.
+#
+log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
+
+# Do not log into datanode logs. Remove this line to have single log.
+log4j.additivity.org.apache.hadoop.ozone=false
+
+# For development purposes, log both to console and log file.
+log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
+log4j.appender.OZONE.Threshold=info
+log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
+log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
+ %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
+
+# Real ozone logger that writes to ozone.log
+log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
+log4j.appender.FILE.Threshold=debug
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
+(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
+%m%n
 #
 # Fair scheduler state dump
 #
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index f32268b..fd72618 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1685,6 +1685,19 @@
 </property>
 
 
+<!-- Ozone file system properties -->
+<property>
+  <name>fs.o3.impl</name>
+  <value>org.apache.hadoop.fs.ozone.OzoneFileSystem</value>
+  <description>The implementation class of the Ozone FileSystem.</description>
+</property>
+
+<property>
+  <name>fs.AbstractFileSystem.o3.impl</name>
+  <value>org.apache.hadoop.fs.ozone.OzFs</value>
+  <description>The implementation class of the OzFs AbstractFileSystem.</description>
+</property>
+
 <!-- ipc properties -->
 
 <property>
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 6ca9c78..023c831 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -101,6 +101,9 @@
     // S3A properties are in a different subtree.
     xmlPrefixToSkipCompare.add("fs.s3a.");
 
+    // O3 properties are in a different subtree.
+    xmlPrefixToSkipCompare.add("fs.o3.");
+
     //ftp properties are in a different subtree.
     // - org.apache.hadoop.fs.ftp.FTPFileSystem.
     xmlPrefixToSkipCompare.add("fs.ftp.impl");
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 8a0453f..f9b8573 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -68,6 +68,8 @@
       <artifactId>hadoop-client-integration-tests</artifactId>
       <scope>provided</scope>
     </dependency>
+
+
   </dependencies>
 
   <build>
@@ -132,6 +134,7 @@
       <!-- Disable the sign plugin, since there isn't anything to sign -->
       <plugin>
         <artifactId>maven-gpg-plugin</artifactId>
+        <version>${maven-gpg-plugin.version}</version>
         <executions>
           <execution>
             <id>sign-artifacts</id>
@@ -171,6 +174,7 @@
                     <argument>${basedir}/../dev-support/bin/dist-layout-stitching</argument>
                     <argument>${project.version}</argument>
                     <argument>${project.build.directory}</argument>
+                    <argument>${hdds.version}</argument>
                   </arguments>
                 </configuration>
               </execution>
@@ -214,6 +218,85 @@
         </plugins>
       </build>
     </profile>
+
+    <profile>
+      <id>hdds</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <dependencies>
+
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-ozone-ozone-manager</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdds-server-scm</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdds-tools</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdds-container-service</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-ozone-objectstore-service</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdds-tools</artifactId>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-ozone-tools</artifactId>
+        </dependency>
+      </dependencies>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-resources-plugin</artifactId>
+              <executions>
+                <execution>
+                  <id>copy-docker-compose</id>
+                  <goals>
+                    <goal>copy-resources</goal>
+                  </goals>
+                  <phase>prepare-package</phase>
+                  <configuration>
+                    <outputDirectory>${project.build.directory}/compose</outputDirectory>
+                    <resources>
+                      <resource>
+                        <directory>src/main/compose</directory>
+                        <filtering>true</filtering>
+                      </resource>
+                    </resources>
+                  </configuration>
+                </execution>
+                <execution>
+                  <id>copy-dockerfile</id>
+                  <goals>
+                    <goal>copy-resources</goal>
+                  </goals>
+                  <phase>prepare-package</phase>
+                  <configuration>
+                    <outputDirectory>${project.build.directory}</outputDirectory>
+                    <resources>
+                      <resource>
+                        <directory>src/main/docker</directory>
+                        <filtering>true</filtering>
+                      </resource>
+                    </resources>
+                  </configuration>
+                </execution>
+              </executions>
+            </plugin>
+        </plugins>
+      </build>
+    </profile>
   </profiles>
 
 </project>
diff --git a/hadoop-dist/src/main/compose/ozone/.env b/hadoop-dist/src/main/compose/ozone/.env
new file mode 100644
index 0000000..af20d3e
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozone/.env
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+VERSION=${project.version}
\ No newline at end of file
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
new file mode 100644
index 0000000..13a7db6
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -0,0 +1,61 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+   namenode:
+      image: apache/hadoop-runner
+      hostname: namenode
+      volumes:
+         - ../..//hadoop-${VERSION}:/opt/hadoop
+      ports:
+         - 9870:9870
+      environment:
+          ENSURE_NAMENODE_DIR: /data/namenode
+      env_file:
+         - ./docker-config
+      command: ["/opt/hadoop/bin/hdfs","namenode"]
+   datanode:
+      image: apache/hadoop-runner
+      volumes:
+        - ../..//hadoop-${VERSION}:/opt/hadoop
+      ports:
+        - 9864
+      command: ["/opt/hadoop/bin/ozone","datanode"]
+      env_file:
+        - ./docker-config
+   ksm:
+      image: apache/hadoop-runner
+      volumes:
+         - ../..//hadoop-${VERSION}:/opt/hadoop
+      ports:
+         - 9874:9874
+      environment:
+         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+      env_file:
+          - ./docker-config
+      command: ["/opt/hadoop/bin/ozone","ksm"]
+   scm:
+      image: apache/hadoop-runner
+      volumes:
+         - ../..//hadoop-${VERSION}:/opt/hadoop
+      ports:
+         - 9876:9876
+      env_file:
+          - ./docker-config
+      environment:
+          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+      command: ["/opt/hadoop/bin/ozone","scm"]
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config
new file mode 100644
index 0000000..c693db0
--- /dev/null
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
+OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.enabled=True
+OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+OZONE-SITE.XML_ozone.handler.type=distributed
+OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
+HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
+HDFS-SITE.XML_rpc.metrics.quantile.enable=true
+HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
+LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
+LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
new file mode 100644
index 0000000..d2efec4
--- /dev/null
+++ b/hadoop-hdds/client/pom.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdds</artifactId>
+    <version>0.2.1-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-hdds-client</artifactId>
+  <version>0.2.1-SNAPSHOT</version>
+  <description>Apache Hadoop Distributed Data Store Client libraries</description>
+  <name>Apache HDDS Client</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.component>hdds</hadoop.component>
+    <is.hadoop.component>true</is.hadoop.component>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-all</artifactId>
+    </dependency>
+
+  </dependencies>
+</project>
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
new file mode 100644
index 0000000..5c702c6
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import io.netty.bootstrap.Bootstrap;
+import io.netty.channel.Channel;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.channel.socket.nio.NioSocketChannel;
+import io.netty.handler.logging.LogLevel;
+import io.netty.handler.logging.LoggingHandler;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Semaphore;
+
+/**
+ * A Client for the storageContainer protocol.
+ */
+public class XceiverClient extends XceiverClientSpi {
+  static final Logger LOG = LoggerFactory.getLogger(XceiverClient.class);
+  private final Pipeline pipeline;
+  private final Configuration config;
+  private Channel channel;
+  private Bootstrap b;
+  private EventLoopGroup group;
+  private final Semaphore semaphore;
+
+  /**
+   * Constructs a client that can communicate with the Container framework on
+   * data nodes.
+   *
+   * @param pipeline - Pipeline that defines the machines.
+   * @param config -- Ozone Config
+   */
+  public XceiverClient(Pipeline pipeline, Configuration config) {
+    super();
+    Preconditions.checkNotNull(pipeline);
+    Preconditions.checkNotNull(config);
+    this.pipeline = pipeline;
+    this.config = config;
+    this.semaphore =
+        new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config));
+  }
+
+  @Override
+  public void connect() throws Exception {
+    if (channel != null && channel.isActive()) {
+      throw new IOException("This client is already connected to a host.");
+    }
+
+    group = new NioEventLoopGroup();
+    b = new Bootstrap();
+    b.group(group)
+        .channel(NioSocketChannel.class)
+        .handler(new LoggingHandler(LogLevel.INFO))
+        .handler(new XceiverClientInitializer(this.pipeline, semaphore));
+    DatanodeDetails leader = this.pipeline.getLeader();
+
+    // read port from the data node, on failure use default configured
+    // port.
+    int port = leader.getContainerPort();
+    if (port == 0) {
+      port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+          OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+    }
+    LOG.debug("Connecting to server Port : " + port);
+    channel = b.connect(leader.getHostName(), port).sync().channel();
+  }
+
+  /**
+   * Returns if the exceiver client connects to a server.
+   *
+   * @return True if the connection is alive, false otherwise.
+   */
+  @VisibleForTesting
+  public boolean isConnected() {
+    return channel.isActive();
+  }
+
+  @Override
+  public void close() {
+    if (group != null) {
+      group.shutdownGracefully().awaitUninterruptibly();
+    }
+  }
+
+  @Override
+  public Pipeline getPipeline() {
+    return pipeline;
+  }
+
+  @Override
+  public ContainerProtos.ContainerCommandResponseProto sendCommand(
+      ContainerProtos.ContainerCommandRequestProto request) throws IOException {
+    try {
+      if ((channel == null) || (!channel.isActive())) {
+        throw new IOException("This channel is not connected.");
+      }
+      XceiverClientHandler handler =
+          channel.pipeline().get(XceiverClientHandler.class);
+
+      return handler.sendCommand(request);
+    } catch (ExecutionException | InterruptedException e) {
+      /**
+       * In case the netty channel handler throws an exception,
+       * the exception thrown will be wrapped within {@link ExecutionException}.
+       * Unwarpping here so that original exception gets passed
+       * to to the client.
+       */
+      if (e instanceof ExecutionException) {
+        Throwable cause = e.getCause();
+        if (cause instanceof IOException) {
+          throw (IOException) cause;
+        }
+      }
+      throw new IOException(
+          "Unexpected exception during execution:" + e.getMessage());
+    }
+  }
+
+  /**
+   * Sends a given command to server gets a waitable future back.
+   *
+   * @param request Request
+   * @return Response to the command
+   * @throws IOException
+   */
+  @Override
+  public CompletableFuture<ContainerProtos.ContainerCommandResponseProto>
+      sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request)
+      throws IOException, ExecutionException, InterruptedException {
+    if ((channel == null) || (!channel.isActive())) {
+      throw new IOException("This channel is not connected.");
+    }
+    XceiverClientHandler handler =
+        channel.pipeline().get(XceiverClientHandler.class);
+    return handler.sendCommandAsync(request);
+  }
+
+  /**
+   * Create a pipeline.
+   *
+   * @param pipelineID - Name of the pipeline.
+   * @param datanodes - Datanodes
+   */
+  @Override
+  public void createPipeline(String pipelineID, List<DatanodeDetails> datanodes)
+      throws IOException {
+    // For stand alone pipeline, there is no notion called setup pipeline.
+    return;
+  }
+
+  /**
+   * Returns pipeline Type.
+   *
+   * @return - Stand Alone as the type.
+   */
+  @Override
+  public HddsProtos.ReplicationType getPipelineType() {
+    return HddsProtos.ReplicationType.STAND_ALONE;
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
new file mode 100644
index 0000000..e2b55ac
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import com.google.common.base.Preconditions;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.SimpleChannelInboundHandler;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandResponseProto;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Iterator;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
+
+/**
+ * Netty client handler.
+ */
+public class XceiverClientHandler extends
+    SimpleChannelInboundHandler<ContainerCommandResponseProto> {
+
+  static final Logger LOG = LoggerFactory.getLogger(XceiverClientHandler.class);
+  private final ConcurrentMap<String, ResponseFuture> responses =
+      new ConcurrentHashMap<>();
+
+  private final Pipeline pipeline;
+  private volatile Channel channel;
+  private XceiverClientMetrics metrics;
+  private final Semaphore semaphore;
+
+  /**
+   * Constructs a client that can communicate to a container server.
+   */
+  public XceiverClientHandler(Pipeline pipeline, Semaphore semaphore) {
+    super(false);
+    Preconditions.checkNotNull(pipeline);
+    this.pipeline = pipeline;
+    this.metrics = XceiverClientManager.getXceiverClientMetrics();
+    this.semaphore = semaphore;
+  }
+
+  /**
+   * <strong>Please keep in mind that this method will be renamed to {@code
+   * messageReceived(ChannelHandlerContext, I)} in 5.0.</strong>
+   * <p>
+   * Is called for each message of type {@link ContainerProtos
+   * .ContainerCommandResponseProto}.
+   *
+   * @param ctx the {@link ChannelHandlerContext} which this {@link
+   * SimpleChannelInboundHandler} belongs to
+   * @param msg the message to handle
+   * @throws Exception is thrown if an error occurred
+   */
+  @Override
+  public void channelRead0(ChannelHandlerContext ctx,
+      ContainerProtos.ContainerCommandResponseProto msg)
+      throws Exception {
+    Preconditions.checkNotNull(msg);
+    metrics.decrPendingContainerOpsMetrics(msg.getCmdType());
+
+    String key = msg.getTraceID();
+    ResponseFuture response = responses.remove(key);
+    semaphore.release();
+
+    if (response != null) {
+      response.getFuture().complete(msg);
+
+      long requestTime = response.getRequestTime();
+      metrics.addContainerOpsLatency(msg.getCmdType(),
+          Time.monotonicNowNanos() - requestTime);
+    } else {
+      LOG.error("A reply received for message that was not queued. trace " +
+          "ID: {}", msg.getTraceID());
+    }
+  }
+
+  @Override
+  public void channelRegistered(ChannelHandlerContext ctx) {
+    LOG.debug("channelRegistered: Connected to ctx");
+    channel = ctx.channel();
+  }
+
+  @Override
+  public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
+    LOG.info("Exception in client " + cause.toString());
+    Iterator<String> keyIterator = responses.keySet().iterator();
+    while (keyIterator.hasNext()) {
+      ResponseFuture response = responses.remove(keyIterator.next());
+      response.getFuture().completeExceptionally(cause);
+      semaphore.release();
+    }
+    ctx.close();
+  }
+
+  /**
+   * Since netty is async, we send a work request and then wait until a response
+   * appears in the reply queue. This is simple sync interface for clients. we
+   * should consider building async interfaces for client if this turns out to
+   * be a performance bottleneck.
+   *
+   * @param request - request.
+   * @return -- response
+   */
+
+  public ContainerCommandResponseProto sendCommand(
+      ContainerProtos.ContainerCommandRequestProto request)
+      throws ExecutionException, InterruptedException {
+    Future<ContainerCommandResponseProto> future = sendCommandAsync(request);
+    return future.get();
+  }
+
+  /**
+   * SendCommandAsyc queues a command to the Netty Subsystem and returns a
+   * CompletableFuture. This Future is marked compeleted in the channelRead0
+   * when the call comes back.
+   * @param request - Request to execute
+   * @return CompletableFuture
+   */
+  public CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(
+      ContainerProtos.ContainerCommandRequestProto request)
+      throws InterruptedException {
+
+    // Throw an exception of request doesn't have traceId
+    if (StringUtils.isEmpty(request.getTraceID())) {
+      throw new IllegalArgumentException("Invalid trace ID");
+    }
+
+    // Setting the datanode ID in the commands, so that we can distinguish
+    // commands when the cluster simulator is running.
+    if(!request.hasDatanodeUuid()) {
+      throw new IllegalArgumentException("Invalid Datanode ID");
+    }
+
+    metrics.incrPendingContainerOpsMetrics(request.getCmdType());
+
+    CompletableFuture<ContainerCommandResponseProto> future
+        = new CompletableFuture<>();
+    ResponseFuture response = new ResponseFuture(future,
+        Time.monotonicNowNanos());
+    semaphore.acquire();
+    ResponseFuture previous = responses.putIfAbsent(
+        request.getTraceID(), response);
+    if (previous != null) {
+      LOG.error("Command with Trace already exists. Ignoring this command. " +
+              "{}. Previous Command: {}", request.getTraceID(),
+          previous.toString());
+      throw new IllegalStateException("Duplicate trace ID. Command with this " +
+          "trace ID is already executing. Please ensure that " +
+          "trace IDs are not reused. ID: " + request.getTraceID());
+    }
+
+    channel.writeAndFlush(request);
+    return response.getFuture();
+  }
+
+  /**
+   * Class wraps response future info.
+   */
+  static class ResponseFuture {
+    private final long requestTime;
+    private final CompletableFuture<ContainerCommandResponseProto> future;
+
+    ResponseFuture(CompletableFuture<ContainerCommandResponseProto> future,
+        long requestTime) {
+      this.future = future;
+      this.requestTime = requestTime;
+    }
+
+    public long getRequestTime() {
+      return requestTime;
+    }
+
+    public CompletableFuture<ContainerCommandResponseProto> getFuture() {
+      return future;
+    }
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
new file mode 100644
index 0000000..e10a9f6
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.ChannelPipeline;
+import io.netty.channel.socket.SocketChannel;
+import io.netty.handler.codec.protobuf.ProtobufDecoder;
+import io.netty.handler.codec.protobuf.ProtobufEncoder;
+import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+
+import java.util.concurrent.Semaphore;
+
+/**
+ * Setup the netty pipeline.
+ */
+public class XceiverClientInitializer extends
+    ChannelInitializer<SocketChannel> {
+  private final Pipeline pipeline;
+  private final Semaphore semaphore;
+
+  /**
+   * Constructs an Initializer for the client pipeline.
+   * @param pipeline  - Pipeline.
+   */
+  public XceiverClientInitializer(Pipeline pipeline, Semaphore semaphore) {
+    this.pipeline = pipeline;
+    this.semaphore = semaphore;
+  }
+
+  /**
+   * This method will be called once when the Channel is registered. After
+   * the method returns this instance will be removed from the
+   * ChannelPipeline of the Channel.
+   *
+   * @param ch   Channel which was registered.
+   * @throws Exception is thrown if an error occurs. In that case the
+   *                   Channel will be closed.
+   */
+  @Override
+  protected void initChannel(SocketChannel ch) throws Exception {
+    ChannelPipeline p = ch.pipeline();
+
+    p.addLast(new ProtobufVarint32FrameDecoder());
+    p.addLast(new ProtobufDecoder(ContainerProtos
+        .ContainerCommandResponseProto.getDefaultInstance()));
+
+    p.addLast(new ProtobufVarint32LengthFieldPrepender());
+    p.addLast(new ProtobufEncoder());
+
+    p.addLast(new XceiverClientHandler(this.pipeline, this.semaphore));
+
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
new file mode 100644
index 0000000..7585104
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .SCM_CONTAINER_CLIENT_MAX_SIZE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
+    .ReplicationType.RATIS;
+
+/**
+ * XceiverClientManager is responsible for the lifecycle of XceiverClient
+ * instances.  Callers use this class to acquire an XceiverClient instance
+ * connected to the desired container pipeline.  When done, the caller also uses
+ * this class to release the previously acquired XceiverClient instance.
+ *
+ *
+ * This class caches connection to container for reuse purpose, such that
+ * accessing same container frequently will be through the same connection
+ * without reestablishing connection. But the connection will be closed if
+ * not being used for a period of time.
+ */
+public class XceiverClientManager implements Closeable {
+
+  //TODO : change this to SCM configuration class
+  private final Configuration conf;
+  private final Cache<String, XceiverClientSpi> clientCache;
+  private final boolean useRatis;
+
+  private static XceiverClientMetrics metrics;
+  /**
+   * Creates a new XceiverClientManager.
+   *
+   * @param conf configuration
+   */
+  public XceiverClientManager(Configuration conf) {
+    Preconditions.checkNotNull(conf);
+    int maxSize = conf.getInt(SCM_CONTAINER_CLIENT_MAX_SIZE_KEY,
+        SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT);
+    long staleThresholdMs = conf.getTimeDuration(
+        SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY,
+        SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT, TimeUnit.MILLISECONDS);
+    this.useRatis = conf.getBoolean(
+        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
+        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
+    this.conf = conf;
+    this.clientCache = CacheBuilder.newBuilder()
+        .expireAfterAccess(staleThresholdMs, TimeUnit.MILLISECONDS)
+        .maximumSize(maxSize)
+        .removalListener(
+            new RemovalListener<String, XceiverClientSpi>() {
+            @Override
+            public void onRemoval(
+                RemovalNotification<String, XceiverClientSpi>
+                  removalNotification) {
+              synchronized (clientCache) {
+                // Mark the entry as evicted
+                XceiverClientSpi info = removalNotification.getValue();
+                info.setEvicted();
+              }
+            }
+          }).build();
+  }
+
+  @VisibleForTesting
+  public Cache<String, XceiverClientSpi> getClientCache() {
+    return clientCache;
+  }
+
+  /**
+   * Acquires a XceiverClientSpi connected to a container capable of
+   * storing the specified key.
+   *
+   * If there is already a cached XceiverClientSpi, simply return
+   * the cached otherwise create a new one.
+   *
+   * @param pipeline the container pipeline for the client connection
+   * @return XceiverClientSpi connected to a container
+   * @throws IOException if a XceiverClientSpi cannot be acquired
+   */
+  public XceiverClientSpi acquireClient(Pipeline pipeline)
+      throws IOException {
+    Preconditions.checkNotNull(pipeline);
+    Preconditions.checkArgument(pipeline.getMachines() != null);
+    Preconditions.checkArgument(!pipeline.getMachines().isEmpty());
+
+    synchronized (clientCache) {
+      XceiverClientSpi info = getClient(pipeline);
+      info.incrementReference();
+      return info;
+    }
+  }
+
+  /**
+   * Releases a XceiverClientSpi after use.
+   *
+   * @param client client to release
+   */
+  public void releaseClient(XceiverClientSpi client) {
+    Preconditions.checkNotNull(client);
+    synchronized (clientCache) {
+      client.decrementReference();
+    }
+  }
+
+  private XceiverClientSpi getClient(Pipeline pipeline)
+      throws IOException {
+    String containerName = pipeline.getContainerName();
+    try {
+      return clientCache.get(containerName,
+          new Callable<XceiverClientSpi>() {
+          @Override
+          public XceiverClientSpi call() throws Exception {
+            XceiverClientSpi client = pipeline.getType() == RATIS ?
+                    XceiverClientRatis.newXceiverClientRatis(pipeline, conf)
+                    : new XceiverClient(pipeline, conf);
+            client.connect();
+            return client;
+          }
+        });
+    } catch (Exception e) {
+      throw new IOException(
+          "Exception getting XceiverClient: " + e.toString(), e);
+    }
+  }
+
+  /**
+   * Close and remove all the cached clients.
+   */
+  public void close() {
+    //closing is done through RemovalListener
+    clientCache.invalidateAll();
+    clientCache.cleanUp();
+
+    if (metrics != null) {
+      metrics.unRegister();
+    }
+  }
+
+  /**
+   * Tells us if Ratis is enabled for this cluster.
+   * @return True if Ratis is enabled.
+   */
+  public boolean isUseRatis() {
+    return useRatis;
+  }
+
+  /**
+   * Returns hard coded 3 as replication factor.
+   * @return 3
+   */
+  public  HddsProtos.ReplicationFactor getFactor() {
+    if(isUseRatis()) {
+      return HddsProtos.ReplicationFactor.THREE;
+    }
+    return HddsProtos.ReplicationFactor.ONE;
+  }
+
+  /**
+   * Returns the default replication type.
+   * @return Ratis or Standalone
+   */
+  public HddsProtos.ReplicationType getType() {
+    // TODO : Fix me and make Ratis default before release.
+    // TODO: Remove this as replication factor and type are pipeline properties
+    if(isUseRatis()) {
+      return HddsProtos.ReplicationType.RATIS;
+    }
+    return HddsProtos.ReplicationType.STAND_ALONE;
+  }
+
+  /**
+   * Get xceiver client metric.
+   */
+  public synchronized static XceiverClientMetrics getXceiverClientMetrics() {
+    if (metrics == null) {
+      metrics = XceiverClientMetrics.create();
+    }
+
+    return metrics;
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
new file mode 100644
index 0000000..a61eba1
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+
+/**
+ * The client metrics for the Storage Container protocol.
+ */
+@InterfaceAudience.Private
+@Metrics(about = "Storage Container Client Metrics", context = "dfs")
+public class XceiverClientMetrics {
+  public static final String SOURCE_NAME = XceiverClientMetrics.class
+      .getSimpleName();
+
+  private @Metric MutableCounterLong pendingOps;
+  private MutableCounterLong[] pendingOpsArray;
+  private MutableRate[] containerOpsLatency;
+  private MetricsRegistry registry;
+
+  public XceiverClientMetrics() {
+    int numEnumEntries = ContainerProtos.Type.values().length;
+    this.registry = new MetricsRegistry(SOURCE_NAME);
+
+    this.pendingOpsArray = new MutableCounterLong[numEnumEntries];
+    this.containerOpsLatency = new MutableRate[numEnumEntries];
+    for (int i = 0; i < numEnumEntries; i++) {
+      pendingOpsArray[i] = registry.newCounter(
+          "numPending" + ContainerProtos.Type.valueOf(i + 1),
+          "number of pending" + ContainerProtos.Type.valueOf(i + 1) + " ops",
+          (long) 0);
+
+      containerOpsLatency[i] = registry.newRate(
+          ContainerProtos.Type.valueOf(i + 1) + "Latency",
+          "latency of " + ContainerProtos.Type.valueOf(i + 1)
+          + " ops");
+    }
+  }
+
+  public static XceiverClientMetrics create() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    return ms.register(SOURCE_NAME, "Storage Container Client Metrics",
+        new XceiverClientMetrics());
+  }
+
+  public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) {
+    pendingOps.incr();
+    pendingOpsArray[type.ordinal()].incr();
+  }
+
+  public void decrPendingContainerOpsMetrics(ContainerProtos.Type type) {
+    pendingOps.incr(-1);
+    pendingOpsArray[type.ordinal()].incr(-1);
+  }
+
+  public void addContainerOpsLatency(ContainerProtos.Type type,
+      long latencyNanos) {
+    containerOpsLatency[type.ordinal()].add(latencyNanos);
+  }
+
+  public long getContainerOpsMetrics(ContainerProtos.Type type) {
+    return pendingOpsArray[type.ordinal()].value();
+  }
+
+  public void unRegister() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.unregisterSource(SOURCE_NAME);
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
new file mode 100644
index 0000000..d010c69
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.ratis.RatisHelper;
+import org.apache.ratis.client.RaftClient;
+import org.apache.ratis.protocol.RaftClientReply;
+import org.apache.ratis.protocol.RaftGroup;
+import org.apache.ratis.protocol.RaftPeer;
+import org.apache.ratis.rpc.RpcType;
+import org.apache.ratis.rpc.SupportedRpcType;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
+import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * An abstract implementation of {@link XceiverClientSpi} using Ratis.
+ * The underlying RPC mechanism can be chosen via the constructor.
+ */
+public final class XceiverClientRatis extends XceiverClientSpi {
+  static final Logger LOG = LoggerFactory.getLogger(XceiverClientRatis.class);
+
+  public static XceiverClientRatis newXceiverClientRatis(
+      Pipeline pipeline, Configuration ozoneConf) {
+    final String rpcType = ozoneConf.get(
+        ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
+        ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
+    final int maxOutstandingRequests =
+        HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
+    return new XceiverClientRatis(pipeline,
+        SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests);
+  }
+
+  private final Pipeline pipeline;
+  private final RpcType rpcType;
+  private final AtomicReference<RaftClient> client = new AtomicReference<>();
+  private final int maxOutstandingRequests;
+
+  /**
+   * Constructs a client.
+   */
+  private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
+      int maxOutStandingChunks) {
+    super();
+    this.pipeline = pipeline;
+    this.rpcType = rpcType;
+    this.maxOutstandingRequests = maxOutStandingChunks;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public void createPipeline(String clusterId, List<DatanodeDetails> datanodes)
+      throws IOException {
+    RaftGroup group = RatisHelper.newRaftGroup(datanodes);
+    LOG.debug("initializing pipeline:{} with nodes:{}", clusterId,
+        group.getPeers());
+    reinitialize(datanodes, group);
+  }
+
+  /**
+   * Returns Ratis as pipeline Type.
+   *
+   * @return - Ratis
+   */
+  @Override
+  public HddsProtos.ReplicationType getPipelineType() {
+    return HddsProtos.ReplicationType.RATIS;
+  }
+
+  private void reinitialize(List<DatanodeDetails> datanodes, RaftGroup group)
+      throws IOException {
+    if (datanodes.isEmpty()) {
+      return;
+    }
+
+    IOException exception = null;
+    for (DatanodeDetails d : datanodes) {
+      try {
+        reinitialize(d, group);
+      } catch (IOException ioe) {
+        if (exception == null) {
+          exception = new IOException(
+              "Failed to reinitialize some of the RaftPeer(s)", ioe);
+        } else {
+          exception.addSuppressed(ioe);
+        }
+      }
+    }
+    if (exception != null) {
+      throw exception;
+    }
+  }
+
+  /**
+   * Adds a new peers to the Ratis Ring.
+   *
+   * @param datanode - new datanode
+   * @param group    - Raft group
+   * @throws IOException - on Failure.
+   */
+  private void reinitialize(DatanodeDetails datanode, RaftGroup group)
+      throws IOException {
+    final RaftPeer p = RatisHelper.toRaftPeer(datanode);
+    try (RaftClient client = RatisHelper.newRaftClient(rpcType, p)) {
+      client.reinitialize(group, p.getId());
+    } catch (IOException ioe) {
+      LOG.error("Failed to reinitialize RaftPeer:{} datanode: {}  ",
+          p, datanode, ioe);
+      throw new IOException("Failed to reinitialize RaftPeer " + p
+          + "(datanode=" + datanode + ")", ioe);
+    }
+  }
+
+  @Override
+  public Pipeline getPipeline() {
+    return pipeline;
+  }
+
+  @Override
+  public void connect() throws Exception {
+    LOG.debug("Connecting to pipeline:{} leader:{}",
+        getPipeline().getPipelineName(),
+        RatisHelper.toRaftPeerId(pipeline.getLeader()));
+    // TODO : XceiverClient ratis should pass the config value of
+    // maxOutstandingRequests so as to set the upper bound on max no of async
+    // requests to be handled by raft client
+    if (!client.compareAndSet(null,
+        RatisHelper.newRaftClient(rpcType, getPipeline()))) {
+      throw new IllegalStateException("Client is already connected.");
+    }
+  }
+
+  @Override
+  public void close() {
+    final RaftClient c = client.getAndSet(null);
+    if (c != null) {
+      try {
+        c.close();
+      } catch (IOException e) {
+        throw new IllegalStateException(e);
+      }
+    }
+  }
+
+  private RaftClient getClient() {
+    return Objects.requireNonNull(client.get(), "client is null");
+  }
+
+  private boolean isReadOnly(ContainerCommandRequestProto proto) {
+    switch (proto.getCmdType()) {
+    case ReadContainer:
+    case ReadChunk:
+    case ListKey:
+    case GetKey:
+    case GetSmallFile:
+    case ListContainer:
+    case ListChunk:
+      return true;
+    case CloseContainer:
+    case WriteChunk:
+    case UpdateContainer:
+    case CompactChunk:
+    case CreateContainer:
+    case DeleteChunk:
+    case DeleteContainer:
+    case DeleteKey:
+    case PutKey:
+    case PutSmallFile:
+    default:
+      return false;
+    }
+  }
+
+  private RaftClientReply sendRequest(ContainerCommandRequestProto request)
+      throws IOException {
+    boolean isReadOnlyRequest = isReadOnly(request);
+    ByteString byteString =
+        ShadedProtoUtil.asShadedByteString(request.toByteArray());
+    LOG.debug("sendCommand {} {}", isReadOnlyRequest, request);
+    final RaftClientReply reply =  isReadOnlyRequest ?
+        getClient().sendReadOnly(() -> byteString) :
+        getClient().send(() -> byteString);
+    LOG.debug("reply {} {}", isReadOnlyRequest, reply);
+    return reply;
+  }
+
+  private CompletableFuture<RaftClientReply> sendRequestAsync(
+      ContainerCommandRequestProto request) throws IOException {
+    boolean isReadOnlyRequest = isReadOnly(request);
+    ByteString byteString =
+        ShadedProtoUtil.asShadedByteString(request.toByteArray());
+    LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request);
+    return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) :
+        getClient().sendAsync(() -> byteString);
+  }
+
+  @Override
+  public ContainerCommandResponseProto sendCommand(
+      ContainerCommandRequestProto request) throws IOException {
+    final RaftClientReply reply = sendRequest(request);
+    Preconditions.checkState(reply.isSuccess());
+    return ContainerCommandResponseProto.parseFrom(
+        ShadedProtoUtil.asByteString(reply.getMessage().getContent()));
+  }
+
+  /**
+   * Sends a given command to server gets a waitable future back.
+   *
+   * @param request Request
+   * @return Response to the command
+   * @throws IOException
+   */
+  @Override
+  public CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(
+      ContainerCommandRequestProto request)
+      throws IOException, ExecutionException, InterruptedException {
+    return sendRequestAsync(request).whenComplete((reply, e) ->
+          LOG.debug("received reply {} for request: {} exception: {}", request,
+              reply, e))
+        .thenApply(reply -> {
+          try {
+            return ContainerCommandResponseProto.parseFrom(
+                ShadedProtoUtil.asByteString(reply.getMessage().getContent()));
+          } catch (InvalidProtocolBufferException e) {
+            throw new CompletionException(e);
+          }
+        });
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
new file mode 100644
index 0000000..8f30a7f
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -0,0 +1,407 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.client;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.protocolPB
+    .StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
+    .ALLOCATED;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
+    .OPEN;
+
+/**
+ * This class provides the client-facing APIs of container operations.
+ */
+public class ContainerOperationClient implements ScmClient {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ContainerOperationClient.class);
+  private static long containerSizeB = -1;
+  private final StorageContainerLocationProtocolClientSideTranslatorPB
+      storageContainerLocationClient;
+  private final XceiverClientManager xceiverClientManager;
+
+  public ContainerOperationClient(
+      StorageContainerLocationProtocolClientSideTranslatorPB
+          storageContainerLocationClient,
+      XceiverClientManager xceiverClientManager) {
+    this.storageContainerLocationClient = storageContainerLocationClient;
+    this.xceiverClientManager = xceiverClientManager;
+  }
+
+  /**
+   * Return the capacity of containers. The current assumption is that all
+   * containers have the same capacity. Therefore one static is sufficient for
+   * any container.
+   * @return The capacity of one container in number of bytes.
+   */
+  public static long getContainerSizeB() {
+    return containerSizeB;
+  }
+
+  /**
+   * Set the capacity of container. Should be exactly once on system start.
+   * @param size Capacity of one container in number of bytes.
+   */
+  public static void setContainerSizeB(long size) {
+    containerSizeB = size;
+  }
+
+  /**
+   * @inheritDoc
+   */
+  @Override
+  public Pipeline createContainer(String containerId, String owner)
+      throws IOException {
+    XceiverClientSpi client = null;
+    try {
+      Pipeline pipeline =
+          storageContainerLocationClient.allocateContainer(
+              xceiverClientManager.getType(),
+              xceiverClientManager.getFactor(), containerId, owner);
+      client = xceiverClientManager.acquireClient(pipeline);
+
+      // Allocated State means that SCM has allocated this pipeline in its
+      // namespace. The client needs to create the pipeline on the machines
+      // which was choosen by the SCM.
+      Preconditions.checkState(pipeline.getLifeCycleState() == ALLOCATED ||
+          pipeline.getLifeCycleState() == OPEN, "Unexpected pipeline state");
+      if (pipeline.getLifeCycleState() == ALLOCATED) {
+        createPipeline(client, pipeline);
+      }
+      // TODO : Container Client State needs to be updated.
+      // TODO : Return ContainerInfo instead of Pipeline
+      createContainer(containerId, client, pipeline);
+      return pipeline;
+    } finally {
+      if (client != null) {
+        xceiverClientManager.releaseClient(client);
+      }
+    }
+  }
+
+  /**
+   * Create a container over pipeline specified by the SCM.
+   *
+   * @param containerId - Container ID
+   * @param client - Client to communicate with Datanodes
+   * @param pipeline - A pipeline that is already created.
+   * @throws IOException
+   */
+  public void createContainer(String containerId, XceiverClientSpi client,
+      Pipeline pipeline) throws IOException {
+    String traceID = UUID.randomUUID().toString();
+    storageContainerLocationClient.notifyObjectStageChange(
+        ObjectStageChangeRequestProto.Type.container,
+        containerId,
+        ObjectStageChangeRequestProto.Op.create,
+        ObjectStageChangeRequestProto.Stage.begin);
+    ContainerProtocolCalls.createContainer(client, traceID);
+    storageContainerLocationClient.notifyObjectStageChange(
+        ObjectStageChangeRequestProto.Type.container,
+        containerId,
+        ObjectStageChangeRequestProto.Op.create,
+        ObjectStageChangeRequestProto.Stage.complete);
+
+    // Let us log this info after we let SCM know that we have completed the
+    // creation state.
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Created container " + containerId
+          + " leader:" + pipeline.getLeader()
+          + " machines:" + pipeline.getMachines());
+    }
+  }
+
+  /**
+   * Creates a pipeline over the machines choosen by the SCM.
+   *
+   * @param client - Client
+   * @param pipeline - pipeline to be createdon Datanodes.
+   * @throws IOException
+   */
+  private void createPipeline(XceiverClientSpi client, Pipeline pipeline)
+      throws IOException {
+
+    Preconditions.checkNotNull(pipeline.getPipelineName(), "Pipeline " +
+        "name cannot be null when client create flag is set.");
+
+    // Pipeline creation is a three step process.
+    //
+    // 1. Notify SCM that this client is doing a create pipeline on
+    // datanodes.
+    //
+    // 2. Talk to Datanodes to create the pipeline.
+    //
+    // 3. update SCM that pipeline creation was successful.
+    storageContainerLocationClient.notifyObjectStageChange(
+        ObjectStageChangeRequestProto.Type.pipeline,
+        pipeline.getPipelineName(),
+        ObjectStageChangeRequestProto.Op.create,
+        ObjectStageChangeRequestProto.Stage.begin);
+
+    client.createPipeline(pipeline.getPipelineName(),
+        pipeline.getMachines());
+
+    storageContainerLocationClient.notifyObjectStageChange(
+        ObjectStageChangeRequestProto.Type.pipeline,
+        pipeline.getPipelineName(),
+        ObjectStageChangeRequestProto.Op.create,
+        ObjectStageChangeRequestProto.Stage.complete);
+
+    // TODO : Should we change the state on the client side ??
+    // That makes sense, but it is not needed for the client to work.
+    LOG.debug("Pipeline creation successful. Pipeline: {}",
+        pipeline.toString());
+  }
+
+  /**
+   * @inheritDoc
+   */
+  @Override
+  public Pipeline createContainer(HddsProtos.ReplicationType type,
+      HddsProtos.ReplicationFactor factor,
+      String containerId, String owner) throws IOException {
+    XceiverClientSpi client = null;
+    try {
+      // allocate container on SCM.
+      Pipeline pipeline =
+          storageContainerLocationClient.allocateContainer(type, factor,
+              containerId, owner);
+      client = xceiverClientManager.acquireClient(pipeline);
+
+      // Allocated State means that SCM has allocated this pipeline in its
+      // namespace. The client needs to create the pipeline on the machines
+      // which was choosen by the SCM.
+      if (pipeline.getLifeCycleState() == ALLOCATED) {
+        createPipeline(client, pipeline);
+      }
+
+      // TODO : Return ContainerInfo instead of Pipeline
+      // connect to pipeline leader and allocate container on leader datanode.
+      client = xceiverClientManager.acquireClient(pipeline);
+      createContainer(containerId, client, pipeline);
+      return pipeline;
+    } finally {
+      if (client != null) {
+        xceiverClientManager.releaseClient(client);
+      }
+    }
+  }
+
+  /**
+   * Returns a set of Nodes that meet a query criteria.
+   *
+   * @param nodeStatuses - A set of criteria that we want the node to have.
+   * @param queryScope - Query scope - Cluster or pool.
+   * @param poolName - if it is pool, a pool name is required.
+   * @return A set of nodes that meet the requested criteria.
+   * @throws IOException
+   */
+  @Override
+  public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
+      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
+      throws IOException {
+    return storageContainerLocationClient.queryNode(nodeStatuses, queryScope,
+        poolName);
+  }
+
+  /**
+   * Creates a specified replication pipeline.
+   */
+  @Override
+  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
+      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
+      throws IOException {
+    return storageContainerLocationClient.createReplicationPipeline(type,
+        factor, nodePool);
+  }
+
+  /**
+   * Delete the container, this will release any resource it uses.
+   * @param pipeline - Pipeline that represents the container.
+   * @param force - True to forcibly delete the container.
+   * @throws IOException
+   */
+  @Override
+  public void deleteContainer(Pipeline pipeline, boolean force)
+      throws IOException {
+    XceiverClientSpi client = null;
+    try {
+      client = xceiverClientManager.acquireClient(pipeline);
+      String traceID = UUID.randomUUID().toString();
+      ContainerProtocolCalls.deleteContainer(client, force, traceID);
+      storageContainerLocationClient
+          .deleteContainer(pipeline.getContainerName());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Deleted container {}, leader: {}, machines: {} ",
+            pipeline.getContainerName(),
+            pipeline.getLeader(),
+            pipeline.getMachines());
+      }
+    } finally {
+      if (client != null) {
+        xceiverClientManager.releaseClient(client);
+      }
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<ContainerInfo> listContainer(String startName,
+      String prefixName, int count)
+      throws IOException {
+    return storageContainerLocationClient.listContainer(
+        startName, prefixName, count);
+  }
+
+  /**
+   * Get meta data from an existing container.
+   *
+   * @param pipeline - pipeline that represents the container.
+   * @return ContainerInfo - a message of protobuf which has basic info
+   * of a container.
+   * @throws IOException
+   */
+  @Override
+  public ContainerData readContainer(Pipeline pipeline) throws IOException {
+    XceiverClientSpi client = null;
+    try {
+      client = xceiverClientManager.acquireClient(pipeline);
+      String traceID = UUID.randomUUID().toString();
+      ReadContainerResponseProto response =
+          ContainerProtocolCalls.readContainer(client,
+              pipeline.getContainerName(), traceID);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Read container {}, leader: {}, machines: {} ",
+            pipeline.getContainerName(),
+            pipeline.getLeader(),
+            pipeline.getMachines());
+      }
+      return response.getContainerData();
+    } finally {
+      if (client != null) {
+        xceiverClientManager.releaseClient(client);
+      }
+    }
+  }
+
+  /**
+   * Given an id, return the pipeline associated with the container.
+   * @param containerId - String Container ID
+   * @return Pipeline of the existing container, corresponding to the given id.
+   * @throws IOException
+   */
+  @Override
+  public Pipeline getContainer(String containerId) throws
+      IOException {
+    return storageContainerLocationClient.getContainer(containerId);
+  }
+
+  /**
+   * Close a container.
+   *
+   * @param pipeline the container to be closed.
+   * @throws IOException
+   */
+  @Override
+  public void closeContainer(Pipeline pipeline) throws IOException {
+    XceiverClientSpi client = null;
+    try {
+      LOG.debug("Close container {}", pipeline);
+      /*
+      TODO: two orders here, revisit this later:
+      1. close on SCM first, then on data node
+      2. close on data node first, then on SCM
+
+      with 1: if client failed after closing on SCM, then there is a
+      container SCM thinks as closed, but is actually open. Then SCM will no
+      longer allocate block to it, which is fine. But SCM may later try to
+      replicate this "closed" container, which I'm not sure is safe.
+
+      with 2: if client failed after close on datanode, then there is a
+      container SCM thinks as open, but is actually closed. Then SCM will still
+      try to allocate block to it. Which will fail when actually doing the
+      write. No more data can be written, but at least the correctness and
+      consistency of existing data will maintain.
+
+      For now, take the #2 way.
+       */
+      // Actually close the container on Datanode
+      client = xceiverClientManager.acquireClient(pipeline);
+      String traceID = UUID.randomUUID().toString();
+
+      String containerId = pipeline.getContainerName();
+
+      storageContainerLocationClient.notifyObjectStageChange(
+          ObjectStageChangeRequestProto.Type.container,
+          containerId,
+          ObjectStageChangeRequestProto.Op.close,
+          ObjectStageChangeRequestProto.Stage.begin);
+
+      ContainerProtocolCalls.closeContainer(client, traceID);
+      // Notify SCM to close the container
+      storageContainerLocationClient.notifyObjectStageChange(
+          ObjectStageChangeRequestProto.Type.container,
+          containerId,
+          ObjectStageChangeRequestProto.Op.close,
+          ObjectStageChangeRequestProto.Stage.complete);
+    } finally {
+      if (client != null) {
+        xceiverClientManager.releaseClient(client);
+      }
+    }
+  }
+
+  /**
+   * Get the the current usage information.
+   * @param pipeline - Pipeline
+   * @return the size of the given container.
+   * @throws IOException
+   */
+  @Override
+  public long getContainerSize(Pipeline pipeline) throws IOException {
+    // TODO : Pipeline can be null, handle it correctly.
+    long size = getContainerSizeB();
+    if (size == -1) {
+      throw new IOException("Container size unknown!");
+    }
+    return size;
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
new file mode 100644
index 0000000..bc5f8d6
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.client;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.text.ParseException;
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Utility methods for Ozone and Container Clients.
+ *
+ * The methods to retrieve SCM service endpoints assume there is a single
+ * SCM service instance. This will change when we switch to replicated service
+ * instances for redundancy.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public final class HddsClientUtils {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      HddsClientUtils.class);
+
+  private static final int NO_PORT = -1;
+
+  private HddsClientUtils() {
+  }
+
+  /**
+   * Date format that used in ozone. Here the format is thread safe to use.
+   */
+  private static final ThreadLocal<DateTimeFormatter> DATE_FORMAT =
+      ThreadLocal.withInitial(() -> {
+        DateTimeFormatter format =
+            DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT);
+        return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE));
+      });
+
+
+  /**
+   * Convert time in millisecond to a human readable format required in ozone.
+   * @return a human readable string for the input time
+   */
+  public static String formatDateTime(long millis) {
+    ZonedDateTime dateTime = ZonedDateTime.ofInstant(
+        Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone());
+    return DATE_FORMAT.get().format(dateTime);
+  }
+
+  /**
+   * Convert time in ozone date format to millisecond.
+   * @return time in milliseconds
+   */
+  public static long formatDateTime(String date) throws ParseException {
+    Preconditions.checkNotNull(date, "Date string should not be null.");
+    return ZonedDateTime.parse(date, DATE_FORMAT.get())
+        .toInstant().getEpochSecond();
+  }
+
+
+
+  /**
+   * verifies that bucket name / volume name is a valid DNS name.
+   *
+   * @param resName Bucket or volume Name to be validated
+   *
+   * @throws IllegalArgumentException
+   */
+  public static void verifyResourceName(String resName)
+      throws IllegalArgumentException {
+
+    if (resName == null) {
+      throw new IllegalArgumentException("Bucket or Volume name is null");
+    }
+
+    if ((resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH) ||
+        (resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH)) {
+      throw new IllegalArgumentException(
+          "Bucket or Volume length is illegal, " +
+              "valid length is 3-63 characters");
+    }
+
+    if ((resName.charAt(0) == '.') || (resName.charAt(0) == '-')) {
+      throw new IllegalArgumentException(
+          "Bucket or Volume name cannot start with a period or dash");
+    }
+
+    if ((resName.charAt(resName.length() - 1) == '.') ||
+        (resName.charAt(resName.length() - 1) == '-')) {
+      throw new IllegalArgumentException(
+          "Bucket or Volume name cannot end with a period or dash");
+    }
+
+    boolean isIPv4 = true;
+    char prev = (char) 0;
+
+    for (int index = 0; index < resName.length(); index++) {
+      char currChar = resName.charAt(index);
+
+      if (currChar != '.') {
+        isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4;
+      }
+
+      if (currChar > 'A' && currChar < 'Z') {
+        throw new IllegalArgumentException(
+            "Bucket or Volume name does not support uppercase characters");
+      }
+
+      if ((currChar != '.') && (currChar != '-')) {
+        if ((currChar < '0') || (currChar > '9' && currChar < 'a') ||
+            (currChar > 'z')) {
+          throw new IllegalArgumentException("Bucket or Volume name has an " +
+              "unsupported character : " +
+              currChar);
+        }
+      }
+
+      if ((prev == '.') && (currChar == '.')) {
+        throw new IllegalArgumentException("Bucket or Volume name should not " +
+            "have two contiguous periods");
+      }
+
+      if ((prev == '-') && (currChar == '.')) {
+        throw new IllegalArgumentException(
+            "Bucket or Volume name should not have period after dash");
+      }
+
+      if ((prev == '.') && (currChar == '-')) {
+        throw new IllegalArgumentException(
+            "Bucket or Volume name should not have dash after period");
+      }
+      prev = currChar;
+    }
+
+    if (isIPv4) {
+      throw new IllegalArgumentException(
+          "Bucket or Volume name cannot be an IPv4 address or all numeric");
+    }
+  }
+
+  /**
+   * Returns the cache value to be used for list calls.
+   * @param conf Configuration object
+   * @return list cache size
+   */
+  public static int getListCacheSize(Configuration conf) {
+    return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
+        OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
+  }
+
+  /**
+   * @return a default instance of {@link CloseableHttpClient}.
+   */
+  public static CloseableHttpClient newHttpClient() {
+    return HddsClientUtils.newHttpClient(new Configuration());
+  }
+
+  /**
+   * Returns a {@link CloseableHttpClient} configured by given configuration.
+   * If conf is null, returns a default instance.
+   *
+   * @param conf configuration
+   * @return a {@link CloseableHttpClient} instance.
+   */
+  public static CloseableHttpClient newHttpClient(Configuration conf) {
+    long socketTimeout = OzoneConfigKeys
+        .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT;
+    long connectionTimeout = OzoneConfigKeys
+        .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT;
+    if (conf != null) {
+      socketTimeout = conf.getTimeDuration(
+          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
+          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
+          TimeUnit.MILLISECONDS);
+      connectionTimeout = conf.getTimeDuration(
+          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
+          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
+          TimeUnit.MILLISECONDS);
+    }
+
+    CloseableHttpClient client = HttpClients.custom()
+        .setDefaultRequestConfig(
+            RequestConfig.custom()
+                .setSocketTimeout(Math.toIntExact(socketTimeout))
+                .setConnectTimeout(Math.toIntExact(connectionTimeout))
+                .build())
+        .build();
+    return client;
+  }
+
+  /**
+   * Returns the maximum no of outstanding async requests to be handled by
+   * Standalone and Ratis client.
+   */
+  public static int getMaxOutstandingRequests(Configuration config) {
+    return config
+        .getInt(ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS,
+            ScmConfigKeys
+                .SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
new file mode 100644
index 0000000..73ad78c
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.client;
+
+/**
+ * Client facing classes for the container operations.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
new file mode 100644
index 0000000..9390bc1
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+/**
+ * Classes for different type of container service client.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
new file mode 100644
index 0000000..9b8eaa9
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+import com.google.protobuf.ByteString;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadChunkResponseProto;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * An {@link InputStream} used by the REST service in combination with the
+ * SCMClient to read the value of a key from a sequence
+ * of container chunks.  All bytes of the key value are stored in container
+ * chunks.  Each chunk may contain multiple underlying {@link ByteBuffer}
+ * instances.  This class encapsulates all state management for iterating
+ * through the sequence of chunks and the sequence of buffers within each chunk.
+ */
+public class ChunkInputStream extends InputStream implements Seekable {
+
+  private static final int EOF = -1;
+
+  private final String key;
+  private final String traceID;
+  private XceiverClientManager xceiverClientManager;
+  private XceiverClientSpi xceiverClient;
+  private List<ChunkInfo> chunks;
+  private int chunkIndex;
+  private long[] chunkOffset;
+  private List<ByteBuffer> buffers;
+  private int bufferIndex;
+
+  /**
+   * Creates a new ChunkInputStream.
+   *
+   * @param key chunk key
+   * @param xceiverClientManager client manager that controls client
+   * @param xceiverClient client to perform container calls
+   * @param chunks list of chunks to read
+   * @param traceID container protocol call traceID
+   */
+  public ChunkInputStream(String key, XceiverClientManager xceiverClientManager,
+      XceiverClientSpi xceiverClient, List<ChunkInfo> chunks, String traceID) {
+    this.key = key;
+    this.traceID = traceID;
+    this.xceiverClientManager = xceiverClientManager;
+    this.xceiverClient = xceiverClient;
+    this.chunks = chunks;
+    this.chunkIndex = -1;
+    // chunkOffset[i] stores offset at which chunk i stores data in
+    // ChunkInputStream
+    this.chunkOffset = new long[this.chunks.size()];
+    initializeChunkOffset();
+    this.buffers = null;
+    this.bufferIndex = 0;
+  }
+
+  private void initializeChunkOffset() {
+    int tempOffset = 0;
+    for (int i = 0; i < chunks.size(); i++) {
+      chunkOffset[i] = tempOffset;
+      tempOffset += chunks.get(i).getLen();
+    }
+  }
+
+  @Override
+  public synchronized int read()
+      throws IOException {
+    checkOpen();
+    int available = prepareRead(1);
+    return available == EOF ? EOF :
+        Byte.toUnsignedInt(buffers.get(bufferIndex).get());
+  }
+
+  @Override
+  public synchronized int read(byte[] b, int off, int len) throws IOException {
+    // According to the JavaDocs for InputStream, it is recommended that
+    // subclasses provide an override of bulk read if possible for performance
+    // reasons.  In addition to performance, we need to do it for correctness
+    // reasons.  The Ozone REST service uses PipedInputStream and
+    // PipedOutputStream to relay HTTP response data between a Jersey thread and
+    // a Netty thread.  It turns out that PipedInputStream/PipedOutputStream
+    // have a subtle dependency (bug?) on the wrapped stream providing separate
+    // implementations of single-byte read and bulk read.  Without this, get key
+    // responses might close the connection before writing all of the bytes
+    // advertised in the Content-Length.
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || len > b.length - off) {
+      throw new IndexOutOfBoundsException();
+    }
+    if (len == 0) {
+      return 0;
+    }
+    checkOpen();
+    int available = prepareRead(len);
+    if (available == EOF) {
+      return EOF;
+    }
+    buffers.get(bufferIndex).get(b, off, available);
+    return available;
+  }
+
+  @Override
+  public synchronized void close() {
+    if (xceiverClientManager != null && xceiverClient != null) {
+      xceiverClientManager.releaseClient(xceiverClient);
+      xceiverClientManager = null;
+      xceiverClient = null;
+    }
+  }
+
+  /**
+   * Checks if the stream is open.  If not, throws an exception.
+   *
+   * @throws IOException if stream is closed
+   */
+  private synchronized void checkOpen() throws IOException {
+    if (xceiverClient == null) {
+      throw new IOException("ChunkInputStream has been closed.");
+    }
+  }
+
+  /**
+   * Prepares to read by advancing through chunks and buffers as needed until it
+   * finds data to return or encounters EOF.
+   *
+   * @param len desired length of data to read
+   * @return length of data available to read, possibly less than desired length
+   */
+  private synchronized int prepareRead(int len) throws IOException {
+    for (;;) {
+      if (chunks == null || chunks.isEmpty()) {
+        // This must be an empty key.
+        return EOF;
+      } else if (buffers == null) {
+        // The first read triggers fetching the first chunk.
+        readChunkFromContainer();
+      } else if (!buffers.isEmpty() &&
+          buffers.get(bufferIndex).hasRemaining()) {
+        // Data is available from the current buffer.
+        ByteBuffer bb = buffers.get(bufferIndex);
+        return len > bb.remaining() ? bb.remaining() : len;
+      } else if (!buffers.isEmpty() &&
+          !buffers.get(bufferIndex).hasRemaining() &&
+          bufferIndex < buffers.size() - 1) {
+        // There are additional buffers available.
+        ++bufferIndex;
+      } else if (chunkIndex < chunks.size() - 1) {
+        // There are additional chunks available.
+        readChunkFromContainer();
+      } else {
+        // All available input has been consumed.
+        return EOF;
+      }
+    }
+  }
+
+  /**
+   * Attempts to read the chunk at the specified offset in the chunk list.  If
+   * successful, then the data of the read chunk is saved so that its bytes can
+   * be returned from subsequent read calls.
+   *
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  private synchronized void readChunkFromContainer() throws IOException {
+    // On every chunk read chunkIndex should be increased so as to read the
+    // next chunk
+    chunkIndex += 1;
+    final ReadChunkResponseProto readChunkResponse;
+    try {
+      readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient,
+          chunks.get(chunkIndex), key, traceID);
+    } catch (IOException e) {
+      throw new IOException("Unexpected OzoneException: " + e.toString(), e);
+    }
+    ByteString byteString = readChunkResponse.getData();
+    buffers = byteString.asReadOnlyByteBufferList();
+    bufferIndex = 0;
+  }
+
+  @Override
+  public synchronized void seek(long pos) throws IOException {
+    if (pos < 0 || (chunks.size() == 0 && pos > 0)
+        || pos >= chunkOffset[chunks.size() - 1] + chunks.get(chunks.size() - 1)
+        .getLen()) {
+      throw new EOFException(
+          "EOF encountered pos: " + pos + " container key: " + key);
+    }
+    if (chunkIndex == -1) {
+      chunkIndex = Arrays.binarySearch(chunkOffset, pos);
+    } else if (pos < chunkOffset[chunkIndex]) {
+      chunkIndex = Arrays.binarySearch(chunkOffset, 0, chunkIndex, pos);
+    } else if (pos >= chunkOffset[chunkIndex] + chunks.get(chunkIndex)
+        .getLen()) {
+      chunkIndex =
+          Arrays.binarySearch(chunkOffset, chunkIndex + 1, chunks.size(), pos);
+    }
+    if (chunkIndex < 0) {
+      // Binary search returns -insertionPoint - 1  if element is not present
+      // in the array. insertionPoint is the point at which element would be
+      // inserted in the sorted array. We need to adjust the chunkIndex
+      // accordingly so that chunkIndex = insertionPoint - 1
+      chunkIndex = -chunkIndex -2;
+    }
+    // adjust chunkIndex so that readChunkFromContainer reads the correct chunk
+    chunkIndex -= 1;
+    readChunkFromContainer();
+    adjustBufferIndex(pos);
+  }
+
+  private void adjustBufferIndex(long pos) {
+    long tempOffest = chunkOffset[chunkIndex];
+    for (int i = 0; i < buffers.size(); i++) {
+      if (pos - tempOffest >= buffers.get(i).capacity()) {
+        tempOffest += buffers.get(i).capacity();
+      } else {
+        bufferIndex = i;
+        break;
+      }
+    }
+    buffers.get(bufferIndex).position((int) (pos - tempOffest));
+  }
+
+  @Override
+  public synchronized long getPos() throws IOException {
+    return chunkIndex == -1 ? 0 :
+        chunkOffset[chunkIndex] + buffers.get(bufferIndex).position();
+  }
+
+  @Override
+  public boolean seekToNewSource(long targetPos) throws IOException {
+    return false;
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
new file mode 100644
index 0000000..b65df9f
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+import com.google.protobuf.ByteString;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putKey;
+import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
+    .writeChunk;
+
+/**
+ * An {@link OutputStream} used by the REST service in combination with the
+ * SCMClient to write the value of a key to a sequence
+ * of container chunks.  Writes are buffered locally and periodically written to
+ * the container as a new chunk.  In order to preserve the semantics that
+ * replacement of a pre-existing key is atomic, each instance of the stream has
+ * an internal unique identifier.  This unique identifier and a monotonically
+ * increasing chunk index form a composite key that is used as the chunk name.
+ * After all data is written, a putKey call creates or updates the corresponding
+ * container key, and this call includes the full list of chunks that make up
+ * the key data.  The list of chunks is updated all at once.  Therefore, a
+ * concurrent reader never can see an intermediate state in which different
+ * chunks of data from different versions of the key data are interleaved.
+ * This class encapsulates all state management for buffering and writing
+ * through to the container.
+ */
+public class ChunkOutputStream extends OutputStream {
+
+  private final String containerKey;
+  private final String key;
+  private final String traceID;
+  private final KeyData.Builder containerKeyData;
+  private XceiverClientManager xceiverClientManager;
+  private XceiverClientSpi xceiverClient;
+  private ByteBuffer buffer;
+  private final String streamId;
+  private int chunkIndex;
+  private int chunkSize;
+
+  /**
+   * Creates a new ChunkOutputStream.
+   *
+   * @param containerKey container key
+   * @param key chunk key
+   * @param xceiverClientManager client manager that controls client
+   * @param xceiverClient client to perform container calls
+   * @param traceID container protocol call args
+   * @param chunkSize chunk size
+   */
+  public ChunkOutputStream(String containerKey, String key,
+      XceiverClientManager xceiverClientManager, XceiverClientSpi xceiverClient,
+      String traceID, int chunkSize) {
+    this.containerKey = containerKey;
+    this.key = key;
+    this.traceID = traceID;
+    this.chunkSize = chunkSize;
+    KeyValue keyValue = KeyValue.newBuilder()
+        .setKey("TYPE").setValue("KEY").build();
+    this.containerKeyData = KeyData.newBuilder()
+        .setContainerName(xceiverClient.getPipeline().getContainerName())
+        .setName(containerKey)
+        .addMetadata(keyValue);
+    this.xceiverClientManager = xceiverClientManager;
+    this.xceiverClient = xceiverClient;
+    this.buffer = ByteBuffer.allocate(chunkSize);
+    this.streamId = UUID.randomUUID().toString();
+    this.chunkIndex = 0;
+  }
+
+  @Override
+  public synchronized void write(int b) throws IOException {
+    checkOpen();
+    int rollbackPosition = buffer.position();
+    int rollbackLimit = buffer.limit();
+    buffer.put((byte)b);
+    if (buffer.position() == chunkSize) {
+      flushBufferToChunk(rollbackPosition, rollbackLimit);
+    }
+  }
+
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if ((off < 0) || (off > b.length) || (len < 0) ||
+        ((off + len) > b.length) || ((off + len) < 0)) {
+      throw new IndexOutOfBoundsException();
+    }
+    if (len == 0) {
+      return;
+    }
+    checkOpen();
+    while (len > 0) {
+      int writeLen = Math.min(chunkSize - buffer.position(), len);
+      int rollbackPosition = buffer.position();
+      int rollbackLimit = buffer.limit();
+      buffer.put(b, off, writeLen);
+      if (buffer.position() == chunkSize) {
+        flushBufferToChunk(rollbackPosition, rollbackLimit);
+      }
+      off += writeLen;
+      len -= writeLen;
+    }
+  }
+
+  @Override
+  public synchronized void flush() throws IOException {
+    checkOpen();
+    if (buffer.position() > 0) {
+      int rollbackPosition = buffer.position();
+      int rollbackLimit = buffer.limit();
+      flushBufferToChunk(rollbackPosition, rollbackLimit);
+    }
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    if (xceiverClientManager != null && xceiverClient != null &&
+        buffer != null) {
+      try {
+        if (buffer.position() > 0) {
+          writeChunkToContainer();
+        }
+        putKey(xceiverClient, containerKeyData.build(), traceID);
+      } catch (IOException e) {
+        throw new IOException(
+            "Unexpected Storage Container Exception: " + e.toString(), e);
+      } finally {
+        xceiverClientManager.releaseClient(xceiverClient);
+        xceiverClientManager = null;
+        xceiverClient = null;
+        buffer = null;
+      }
+    }
+
+  }
+
+  /**
+   * Checks if the stream is open.  If not, throws an exception.
+   *
+   * @throws IOException if stream is closed
+   */
+  private synchronized void checkOpen() throws IOException {
+    if (xceiverClient == null) {
+      throw new IOException("ChunkOutputStream has been closed.");
+    }
+  }
+
+  /**
+   * Attempts to flush buffered writes by writing a new chunk to the container.
+   * If successful, then clears the buffer to prepare to receive writes for a
+   * new chunk.
+   *
+   * @param rollbackPosition position to restore in buffer if write fails
+   * @param rollbackLimit limit to restore in buffer if write fails
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  private synchronized void flushBufferToChunk(int rollbackPosition,
+      int rollbackLimit) throws IOException {
+    boolean success = false;
+    try {
+      writeChunkToContainer();
+      success = true;
+    } finally {
+      if (success) {
+        buffer.clear();
+      } else {
+        buffer.position(rollbackPosition);
+        buffer.limit(rollbackLimit);
+      }
+    }
+  }
+
+  /**
+   * Writes buffered data as a new chunk to the container and saves chunk
+   * information to be used later in putKey call.
+   *
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  private synchronized void writeChunkToContainer() throws IOException {
+    buffer.flip();
+    ByteString data = ByteString.copyFrom(buffer);
+    ChunkInfo chunk = ChunkInfo
+        .newBuilder()
+        .setChunkName(
+            DigestUtils.md5Hex(key) + "_stream_"
+                + streamId + "_chunk_" + ++chunkIndex)
+        .setOffset(0)
+        .setLen(data.size())
+        .build();
+    try {
+      writeChunk(xceiverClient, chunk, key, data, traceID);
+    } catch (IOException e) {
+      throw new IOException(
+          "Unexpected Storage Container Exception: " + e.toString(), e);
+    }
+    containerKeyData.addChunks(chunk);
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
new file mode 100644
index 0000000..6e7ce94
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+/**
+ * Low level IO streams to upload/download chunks from container service.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 0000000..3571a89
--- /dev/null
+++ b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,21 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+  <Match>
+    <Package name="org.apache.hadoop.hdds.protocol.proto"/>
+  </Match>
+</FindBugsFilter>
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
new file mode 100644
index 0000000..b81da96
--- /dev/null
+++ b/hadoop-hdds/common/pom.xml
@@ -0,0 +1,128 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdds</artifactId>
+    <version>0.2.1-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-hdds-common</artifactId>
+  <version>0.2.1-SNAPSHOT</version>
+  <description>Apache Hadoop Distributed Data Store Common</description>
+  <name>Apache HDDS Common</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.component>hdds</hadoop.component>
+    <is.hadoop.component>true</is.hadoop.component>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.fusesource.leveldbjni</groupId>
+      <artifactId>leveldbjni-all</artifactId>
+    </dependency>
+
+    <dependency>
+      <artifactId>ratis-server</artifactId>
+      <groupId>org.apache.ratis</groupId>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>io.dropwizard.metrics</groupId>
+          <artifactId>metrics-core</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <artifactId>ratis-netty</artifactId>
+      <groupId>org.apache.ratis</groupId>
+    </dependency>
+    <dependency>
+      <artifactId>ratis-grpc</artifactId>
+      <groupId>org.apache.ratis</groupId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.rocksdb</groupId>
+      <artifactId>rocksdbjni</artifactId>
+      <version>5.8.0</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
+        <executions>
+          <execution>
+            <id>compile-protoc</id>
+            <goals>
+              <goal>protoc</goal>
+            </goals>
+            <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
+              <imports>
+                <param>
+                  ${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
+                </param>
+                <param>
+                  ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/
+                </param>
+                <param>
+                  ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
+                </param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>StorageContainerLocationProtocol.proto</include>
+                  <include>DatanodeContainerProtocol.proto</include>
+                  <include>hdds.proto</include>
+                  <include>ScmBlockLocationProtocol.proto</include>
+                </includes>
+              </source>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
new file mode 100644
index 0000000..dec2c1c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds;
+
+public final class HddsConfigKeys {
+  private HddsConfigKeys() {
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
new file mode 100644
index 0000000..48c6dce
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -0,0 +1,318 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Strings;
+import com.google.common.net.HostAndPort;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.nio.file.Paths;
+import java.util.Collection;
+import java.util.HashSet;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys
+    .DFS_DATANODE_DNS_INTERFACE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys
+    .DFS_DATANODE_DNS_NAMESERVER_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
+
+/**
+ * HDDS specific stateless utility functions.
+ */
+public final class HddsUtils {
+
+
+  private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);
+
+  /**
+   * The service ID of the solitary Ozone SCM service.
+   */
+  public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService";
+  public static final String OZONE_SCM_SERVICE_INSTANCE_ID =
+      "OzoneScmServiceInstance";
+
+  private static final int NO_PORT = -1;
+
+  private HddsUtils() {
+  }
+
+  /**
+   * Retrieve the socket address that should be used by clients to connect
+   * to the SCM.
+   *
+   * @param conf
+   * @return Target InetSocketAddress for the SCM client endpoint.
+   */
+  public static InetSocketAddress getScmAddressForClients(Configuration conf) {
+    final Optional<String> host = getHostNameFromConfigKeys(conf,
+        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
+
+    if (!host.isPresent()) {
+      throw new IllegalArgumentException(
+          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See"
+              + " https://wiki.apache.org/hadoop/Ozone#Configuration for "
+              + "details"
+              + " on configuring Ozone.");
+    }
+
+    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
+        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
+
+    return NetUtils.createSocketAddr(host.get() + ":" + port
+        .or(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
+  }
+
+  /**
+   * Retrieve the socket address that should be used by clients to connect
+   * to the SCM for block service. If
+   * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined
+   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used.
+   *
+   * @param conf
+   * @return Target InetSocketAddress for the SCM block client endpoint.
+   * @throws IllegalArgumentException if configuration is not defined.
+   */
+  public static InetSocketAddress getScmAddressForBlockClients(
+      Configuration conf) {
+    Optional<String> host = getHostNameFromConfigKeys(conf,
+        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
+
+    if (!host.isPresent()) {
+      host = getHostNameFromConfigKeys(conf,
+          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
+      if (!host.isPresent()) {
+        throw new IllegalArgumentException(
+            ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
+                + " must be defined. See"
+                + " https://wiki.apache.org/hadoop/Ozone#Configuration"
+                + " for details on configuring Ozone.");
+      }
+    }
+
+    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
+        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
+
+    return NetUtils.createSocketAddr(host.get() + ":" + port
+        .or(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
+  }
+
+  /**
+   * Retrieve the hostname, trying the supplied config keys in order.
+   * Each config value may be absent, or if present in the format
+   * host:port (the :port part is optional).
+   *
+   * @param conf  - Conf
+   * @param keys a list of configuration key names.
+   *
+   * @return first hostname component found from the given keys, or absent.
+   * @throws IllegalArgumentException if any values are not in the 'host'
+   *             or host:port format.
+   */
+  public static Optional<String> getHostNameFromConfigKeys(Configuration conf,
+      String... keys) {
+    for (final String key : keys) {
+      final String value = conf.getTrimmed(key);
+      final Optional<String> hostName = getHostName(value);
+      if (hostName.isPresent()) {
+        return hostName;
+      }
+    }
+    return Optional.absent();
+  }
+
+  /**
+   * Gets the hostname or Indicates that it is absent.
+   * @param value host or host:port
+   * @return hostname
+   */
+  public static Optional<String> getHostName(String value) {
+    if ((value == null) || value.isEmpty()) {
+      return Optional.absent();
+    }
+    return Optional.of(HostAndPort.fromString(value).getHostText());
+  }
+
+  /**
+   * Gets the port if there is one, throws otherwise.
+   * @param value  String in host:port format.
+   * @return Port
+   */
+  public static Optional<Integer> getHostPort(String value) {
+    if ((value == null) || value.isEmpty()) {
+      return Optional.absent();
+    }
+    int port = HostAndPort.fromString(value).getPortOrDefault(NO_PORT);
+    if (port == NO_PORT) {
+      return Optional.absent();
+    } else {
+      return Optional.of(port);
+    }
+  }
+
+  /**
+   * Retrieve the port number, trying the supplied config keys in order.
+   * Each config value may be absent, or if present in the format
+   * host:port (the :port part is optional).
+   *
+   * @param conf Conf
+   * @param keys a list of configuration key names.
+   *
+   * @return first port number component found from the given keys, or absent.
+   * @throws IllegalArgumentException if any values are not in the 'host'
+   *             or host:port format.
+   */
+  public static Optional<Integer> getPortNumberFromConfigKeys(
+      Configuration conf, String... keys) {
+    for (final String key : keys) {
+      final String value = conf.getTrimmed(key);
+      final Optional<Integer> hostPort = getHostPort(value);
+      if (hostPort.isPresent()) {
+        return hostPort;
+      }
+    }
+    return Optional.absent();
+  }
+
+  /**
+   * Retrieve the socket addresses of all storage container managers.
+   *
+   * @param conf
+   * @return A collection of SCM addresses
+   * @throws IllegalArgumentException If the configuration is invalid
+   */
+  public static Collection<InetSocketAddress> getSCMAddresses(
+      Configuration conf) throws IllegalArgumentException {
+    Collection<InetSocketAddress> addresses =
+        new HashSet<InetSocketAddress>();
+    Collection<String> names =
+        conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
+    if (names == null || names.isEmpty()) {
+      throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES
+          + " need to be a set of valid DNS names or IP addresses."
+          + " Null or empty address list found.");
+    }
+
+    final com.google.common.base.Optional<Integer>
+        defaultPort =  com.google.common.base.Optional.of(ScmConfigKeys
+        .OZONE_SCM_DEFAULT_PORT);
+    for (String address : names) {
+      com.google.common.base.Optional<String> hostname =
+          getHostName(address);
+      if (!hostname.isPresent()) {
+        throw new IllegalArgumentException("Invalid hostname for SCM: "
+            + hostname);
+      }
+      com.google.common.base.Optional<Integer> port =
+          getHostPort(address);
+      InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(),
+          port.or(defaultPort.get()));
+      addresses.add(addr);
+    }
+    return addresses;
+  }
+
+  public static boolean isHddsEnabled(Configuration conf) {
+    String securityEnabled =
+        conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+            "simple");
+    boolean securityAuthorizationEnabled = conf.getBoolean(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false);
+
+    if (securityEnabled.equals("kerberos") || securityAuthorizationEnabled) {
+      LOG.error("Ozone is not supported in a security enabled cluster. ");
+      return false;
+    } else {
+      return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
+    }
+  }
+
+
+  /**
+   * Get the path for datanode id file.
+   *
+   * @param conf - Configuration
+   * @return the path of datanode id as string
+   */
+  public static String getDatanodeIdFilePath(Configuration conf) {
+    String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
+    if (dataNodeIDPath == null) {
+      String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
+      if (Strings.isNullOrEmpty(metaPath)) {
+        // this means meta data is not found, in theory should not happen at
+        // this point because should've failed earlier.
+        throw new IllegalArgumentException("Unable to locate meta data" +
+            "directory when getting datanode id path");
+      }
+      dataNodeIDPath = Paths.get(metaPath,
+          ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
+    }
+    return dataNodeIDPath;
+  }
+
+  /**
+   * Returns the hostname for this datanode. If the hostname is not
+   * explicitly configured in the given config, then it is determined
+   * via the DNS class.
+   *
+   * @param conf Configuration
+   *
+   * @return the hostname (NB: may not be a FQDN)
+   * @throws UnknownHostException if the dfs.datanode.dns.interface
+   *    option is used and the hostname can not be determined
+   */
+  public static String getHostName(Configuration conf)
+      throws UnknownHostException {
+    String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
+    if (name == null) {
+      String dnsInterface = conf.get(
+          CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY);
+      String nameServer = conf.get(
+          CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
+      boolean fallbackToHosts = false;
+
+      if (dnsInterface == null) {
+        // Try the legacy configuration keys.
+        dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
+        nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
+      } else {
+        // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
+        // resolution if DNS fails. We will not use hosts file resolution
+        // by default to avoid breaking existing clusters.
+        fallbackToHosts = true;
+      }
+
+      name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
+    }
+    return name;
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
new file mode 100644
index 0000000..59708a9
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+import org.apache.hadoop.ozone.OzoneConsts;
+
+
+/**
+ * represents an OzoneQuota Object that can be applied to
+ * a storage volume.
+ */
+public class OzoneQuota {
+
+  public static final String OZONE_QUOTA_BYTES = "BYTES";
+  public static final String OZONE_QUOTA_MB = "MB";
+  public static final String OZONE_QUOTA_GB = "GB";
+  public static final String OZONE_QUOTA_TB = "TB";
+
+  private Units unit;
+  private long size;
+
+  /** Quota Units.*/
+  public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB}
+
+  /**
+   * Returns size.
+   *
+   * @return long
+   */
+  public long getSize() {
+    return size;
+  }
+
+  /**
+   * Returns Units.
+   *
+   * @return Unit in MB, GB or TB
+   */
+  public Units getUnit() {
+    return unit;
+  }
+
+  /**
+   * Constructs a default Quota object.
+   */
+  public OzoneQuota() {
+    this.size = 0;
+    this.unit = Units.UNDEFINED;
+  }
+
+  /**
+   * Constructor for Ozone Quota.
+   *
+   * @param size Long Size
+   * @param unit MB, GB  or TB
+   */
+  public OzoneQuota(long size, Units unit) {
+    this.size = size;
+    this.unit = unit;
+  }
+
+  /**
+   * Formats a quota as a string.
+   *
+   * @param quota the quota to format
+   * @return string representation of quota
+   */
+  public static String formatQuota(OzoneQuota quota) {
+    return String.valueOf(quota.size) + quota.unit;
+  }
+
+  /**
+   * Parses a user provided string and returns the
+   * Quota Object.
+   *
+   * @param quotaString Quota String
+   *
+   * @return OzoneQuota object
+   *
+   * @throws IllegalArgumentException
+   */
+  public static OzoneQuota parseQuota(String quotaString)
+      throws IllegalArgumentException {
+
+    if ((quotaString == null) || (quotaString.isEmpty())) {
+      throw new IllegalArgumentException(
+          "Quota string cannot be null or empty.");
+    }
+
+    String uppercase = quotaString.toUpperCase().replaceAll("\\s+", "");
+    String size = "";
+    int nSize;
+    Units currUnit = Units.MB;
+    Boolean found = false;
+    if (uppercase.endsWith(OZONE_QUOTA_MB)) {
+      size = uppercase
+          .substring(0, uppercase.length() - OZONE_QUOTA_MB.length());
+      currUnit = Units.MB;
+      found = true;
+    }
+
+    if (uppercase.endsWith(OZONE_QUOTA_GB)) {
+      size = uppercase
+          .substring(0, uppercase.length() - OZONE_QUOTA_GB.length());
+      currUnit = Units.GB;
+      found = true;
+    }
+
+    if (uppercase.endsWith(OZONE_QUOTA_TB)) {
+      size = uppercase
+          .substring(0, uppercase.length() - OZONE_QUOTA_TB.length());
+      currUnit = Units.TB;
+      found = true;
+    }
+
+    if (uppercase.endsWith(OZONE_QUOTA_BYTES)) {
+      size = uppercase
+          .substring(0, uppercase.length() - OZONE_QUOTA_BYTES.length());
+      currUnit = Units.BYTES;
+      found = true;
+    }
+
+    if (!found) {
+      throw new IllegalArgumentException(
+          "Quota unit not recognized. Supported values are BYTES, MB, GB and " +
+              "TB.");
+    }
+
+    nSize = Integer.parseInt(size);
+    if (nSize < 0) {
+      throw new IllegalArgumentException("Quota cannot be negative.");
+    }
+
+    return new OzoneQuota(nSize, currUnit);
+  }
+
+
+  /**
+   * Returns size in Bytes or -1 if there is no Quota.
+   */
+  public long sizeInBytes() {
+    switch (this.unit) {
+    case BYTES:
+      return this.getSize();
+    case MB:
+      return this.getSize() * OzoneConsts.MB;
+    case GB:
+      return this.getSize() * OzoneConsts.GB;
+    case TB:
+      return this.getSize() * OzoneConsts.TB;
+    case UNDEFINED:
+    default:
+      return -1;
+    }
+  }
+
+  /**
+   * Returns OzoneQuota corresponding to size in bytes.
+   *
+   * @param sizeInBytes size in bytes to be converted
+   *
+   * @return OzoneQuota object
+   */
+  public static OzoneQuota getOzoneQuota(long sizeInBytes) {
+    long size;
+    Units unit;
+    if (sizeInBytes % OzoneConsts.TB == 0) {
+      size = sizeInBytes / OzoneConsts.TB;
+      unit = Units.TB;
+    } else if (sizeInBytes % OzoneConsts.GB == 0) {
+      size = sizeInBytes / OzoneConsts.GB;
+      unit = Units.GB;
+    } else if (sizeInBytes % OzoneConsts.MB == 0) {
+      size = sizeInBytes / OzoneConsts.MB;
+      unit = Units.MB;
+    } else {
+      size = sizeInBytes;
+      unit = Units.BYTES;
+    }
+    return new OzoneQuota((int)size, unit);
+  }
+
+  @Override
+  public String toString() {
+    return size + " " + unit;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
new file mode 100644
index 0000000..0215964
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+/**
+ * The replication factor to be used while writing key into ozone.
+ */
+public enum ReplicationFactor {
+  ONE(1),
+  THREE(3);
+
+  /**
+   * Integer representation of replication.
+   */
+  private int value;
+
+  /**
+   * Initializes ReplicationFactor with value.
+   * @param value replication value
+   */
+  ReplicationFactor(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Returns enum value corresponding to the int value.
+   * @param value replication value
+   * @return ReplicationFactor
+   */
+  public static ReplicationFactor valueOf(int value) {
+    if(value == 1) {
+      return ONE;
+    }
+    if (value == 3) {
+      return THREE;
+    }
+    throw new IllegalArgumentException("Unsupported value: " + value);
+  }
+
+  /**
+   * Returns integer representation of ReplicationFactor.
+   * @return replication value
+   */
+  public int getValue() {
+    return value;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
new file mode 100644
index 0000000..259a1a2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+/**
+ * The replication type to be used while writing key into ozone.
+ */
+public enum ReplicationType {
+    RATIS,
+    STAND_ALONE,
+    CHAINED
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
new file mode 100644
index 0000000..e81f134
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+/**
+ * Base property types for HDDS containers and replications.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
new file mode 100644
index 0000000..b8d0b24
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import com.google.gson.Gson;
+import java.io.IOException;
+import java.io.Writer;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.HttpHeaders;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.HttpServer2;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A servlet to print out the running configuration data.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public class HddsConfServlet extends HttpServlet {
+
+  private static final long serialVersionUID = 1L;
+
+  protected static final String FORMAT_JSON = "json";
+  protected static final String FORMAT_XML = "xml";
+  private static final String COMMAND = "cmd";
+  private static final OzoneConfiguration OZONE_CONFIG =
+      new OzoneConfiguration();
+  private static final transient Logger LOG =
+      LoggerFactory.getLogger(HddsConfServlet.class);
+
+
+  /**
+   * Return the Configuration of the daemon hosting this servlet.
+   * This is populated when the HttpServer starts.
+   */
+  private Configuration getConfFromContext() {
+    Configuration conf = (Configuration) getServletContext().getAttribute(
+        HttpServer2.CONF_CONTEXT_ATTRIBUTE);
+    assert conf != null;
+    return conf;
+  }
+
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response)
+      throws ServletException, IOException {
+
+    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
+        request, response)) {
+      return;
+    }
+
+    String format = parseAcceptHeader(request);
+    if (FORMAT_XML.equals(format)) {
+      response.setContentType("text/xml; charset=utf-8");
+    } else if (FORMAT_JSON.equals(format)) {
+      response.setContentType("application/json; charset=utf-8");
+    }
+
+    String name = request.getParameter("name");
+    Writer out = response.getWriter();
+    String cmd = request.getParameter(COMMAND);
+
+    processCommand(cmd, format, request, response, out, name);
+    out.close();
+  }
+
+  private void processCommand(String cmd, String format,
+      HttpServletRequest request, HttpServletResponse response, Writer out,
+      String name)
+      throws IOException {
+    try {
+      if (cmd == null) {
+        if (FORMAT_XML.equals(format)) {
+          response.setContentType("text/xml; charset=utf-8");
+        } else if (FORMAT_JSON.equals(format)) {
+          response.setContentType("application/json; charset=utf-8");
+        }
+
+        writeResponse(getConfFromContext(), out, format, name);
+      } else {
+        processConfigTagRequest(request, out);
+      }
+    } catch (BadFormatException bfe) {
+      response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
+    } catch (IllegalArgumentException iae) {
+      response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage());
+    }
+  }
+
+  @VisibleForTesting
+  static String parseAcceptHeader(HttpServletRequest request) {
+    String format = request.getHeader(HttpHeaders.ACCEPT);
+    return format != null && format.contains(FORMAT_JSON) ?
+        FORMAT_JSON : FORMAT_XML;
+  }
+
+  /**
+   * Guts of the servlet - extracted for easy testing.
+   */
+  static void writeResponse(Configuration conf,
+      Writer out, String format, String propertyName)
+      throws IOException, IllegalArgumentException, BadFormatException {
+    if (FORMAT_JSON.equals(format)) {
+      Configuration.dumpConfiguration(conf, propertyName, out);
+    } else if (FORMAT_XML.equals(format)) {
+      conf.writeXml(propertyName, out);
+    } else {
+      throw new BadFormatException("Bad format: " + format);
+    }
+  }
+
+  public static class BadFormatException extends Exception {
+
+    private static final long serialVersionUID = 1L;
+
+    public BadFormatException(String msg) {
+      super(msg);
+    }
+  }
+
+  private void processConfigTagRequest(HttpServletRequest request,
+      Writer out) throws IOException {
+    String cmd = request.getParameter(COMMAND);
+    Gson gson = new Gson();
+    Configuration config = getOzoneConfig();
+
+    switch (cmd) {
+    case "getOzoneTags":
+      out.write(gson.toJson(config.get("ozone.system.tags").split(",")));
+      break;
+    case "getPropertyByTag":
+      String tags = request.getParameter("tags");
+      Map<String, Properties> propMap = new HashMap<>();
+
+      for (String tag : tags.split(",")) {
+        if (config.isPropertyTag(tag)) {
+          Properties properties = config.getAllPropertiesByTag(tag);
+          propMap.put(tag, properties);
+        } else {
+          LOG.debug("Not a valid tag" + tag);
+        }
+      }
+      out.write(gson.toJsonTree(propMap).toString());
+      break;
+    default:
+      throw new IllegalArgumentException(cmd + " is not a valid command.");
+    }
+
+  }
+
+  private static Configuration getOzoneConfig() {
+    return OZONE_CONFIG;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
new file mode 100644
index 0000000..f07718c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.conf;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Configuration for ozone.
+ */
+@InterfaceAudience.Private
+public class OzoneConfiguration extends Configuration {
+  static {
+    activate();
+  }
+
+  public OzoneConfiguration() {
+    OzoneConfiguration.activate();
+  }
+
+  public OzoneConfiguration(Configuration conf) {
+    super(conf);
+  }
+
+  public List<Property> readPropertyFromXml(URL url) throws JAXBException {
+    JAXBContext context = JAXBContext.newInstance(XMLConfiguration.class);
+    Unmarshaller um = context.createUnmarshaller();
+
+    XMLConfiguration config = (XMLConfiguration) um.unmarshal(url);
+    return config.getProperties();
+  }
+
+  /**
+   * Class to marshall/un-marshall configuration from xml files.
+   */
+  @XmlAccessorType(XmlAccessType.FIELD)
+  @XmlRootElement(name = "configuration")
+  public static class XMLConfiguration {
+
+    @XmlElement(name = "property", type = Property.class)
+    private List<Property> properties = new ArrayList<>();
+
+    public XMLConfiguration() {
+    }
+
+    public XMLConfiguration(List<Property> properties) {
+      this.properties = properties;
+    }
+
+    public List<Property> getProperties() {
+      return properties;
+    }
+
+    public void setProperties(List<Property> properties) {
+      this.properties = properties;
+    }
+  }
+
+  /**
+   * Class to marshall/un-marshall configuration properties from xml files.
+   */
+  @XmlAccessorType(XmlAccessType.FIELD)
+  @XmlRootElement(name = "property")
+  public static class Property implements Comparable<Property> {
+
+    private String name;
+    private String value;
+    private String tag;
+    private String description;
+
+    public String getName() {
+      return name;
+    }
+
+    public void setName(String name) {
+      this.name = name;
+    }
+
+    public String getValue() {
+      return value;
+    }
+
+    public void setValue(String value) {
+      this.value = value;
+    }
+
+    public String getTag() {
+      return tag;
+    }
+
+    public void setTag(String tag) {
+      this.tag = tag;
+    }
+
+    public String getDescription() {
+      return description;
+    }
+
+    public void setDescription(String description) {
+      this.description = description;
+    }
+
+    @Override
+    public int compareTo(Property o) {
+      if (this == o) {
+        return 0;
+      }
+      return this.getName().compareTo(o.getName());
+    }
+
+    @Override
+    public String toString() {
+      return this.getName() + " " + this.getValue() + this.getTag();
+    }
+
+    @Override
+    public int hashCode(){
+      return this.getName().hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      return (obj instanceof Property) && (((Property) obj).getName())
+          .equals(this.getName());
+    }
+  }
+
+  public static void activate(){
+    // adds the default resources
+    Configuration.addDefaultResource("hdfs-default.xml");
+    Configuration.addDefaultResource("hdfs-site.xml");
+    Configuration.addDefaultResource("ozone-default.xml");
+    Configuration.addDefaultResource("ozone-site.xml");
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
new file mode 100644
index 0000000..948057e
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
new file mode 100644
index 0000000..f8894e6
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds;
+
+/**
+ * Generic HDDS specific configurator and helper classes.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
new file mode 100644
index 0000000..b2fa291
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -0,0 +1,353 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.protocol;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.util.UUID;
+
+/**
+ * DatanodeDetails class contains details about DataNode like:
+ * - UUID of the DataNode.
+ * - IP and Hostname details.
+ * - Port details to which the DataNode will be listening.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class DatanodeDetails implements Comparable<DatanodeDetails> {
+
+  /**
+   * DataNode's unique identifier in the cluster.
+   */
+  private final UUID uuid;
+
+  private String ipAddress;
+  private String hostName;
+  private Integer containerPort;
+  private Integer ratisPort;
+  private Integer ozoneRestPort;
+
+
+  /**
+   * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used
+   * for instantiating DatanodeDetails.
+   * @param uuid DataNode's UUID
+   * @param ipAddress IP Address of this DataNode
+   * @param hostName DataNode's hostname
+   * @param containerPort Container Port
+   * @param ratisPort Ratis Port
+   * @param ozoneRestPort Rest Port
+   */
+  private DatanodeDetails(String uuid, String ipAddress, String hostName,
+      Integer containerPort, Integer ratisPort, Integer ozoneRestPort) {
+    this.uuid = UUID.fromString(uuid);
+    this.ipAddress = ipAddress;
+    this.hostName = hostName;
+    this.containerPort = containerPort;
+    this.ratisPort = ratisPort;
+    this.ozoneRestPort = ozoneRestPort;
+  }
+
+  /**
+   * Returns the DataNode UUID.
+   *
+   * @return UUID of DataNode
+   */
+  public UUID getUuid() {
+    return uuid;
+  }
+
+  /**
+   * Returns the string representation of DataNode UUID.
+   *
+   * @return UUID of DataNode
+   */
+  public String getUuidString() {
+    return uuid.toString();
+  }
+
+  /**
+   * Sets the IP address of Datanode.
+   *
+   * @param ip IP Address
+   */
+  public void setIpAddress(String ip) {
+    this.ipAddress = ip;
+  }
+
+  /**
+   * Returns IP address of DataNode.
+   *
+   * @return IP address
+   */
+  public String getIpAddress() {
+    return ipAddress;
+  }
+
+  /**
+   * Sets the Datanode hostname.
+   *
+   * @param host hostname
+   */
+  public void setHostName(String host) {
+    this.hostName = host;
+  }
+
+  /**
+   * Returns Hostname of DataNode.
+   *
+   * @return Hostname
+   */
+  public String getHostName() {
+    return hostName;
+  }
+
+  /**
+   * Sets the Container Port.
+   * @param port ContainerPort
+   */
+  public void setContainerPort(int port) {
+    containerPort = port;
+  }
+
+  /**
+   * Returns standalone container Port.
+   *
+   * @return Container Port
+   */
+  public int getContainerPort() {
+    return containerPort;
+  }
+
+  /**
+   * Sets Ratis Port.
+   * @param port RatisPort
+   */
+  public void setRatisPort(int port) {
+    ratisPort = port;
+  }
+
+
+  /**
+   * Returns Ratis Port.
+   * @return Ratis Port
+   */
+  public int getRatisPort() {
+    return ratisPort;
+  }
+
+
+  /**
+   * Sets OzoneRestPort.
+   * @param port OzoneRestPort
+   */
+  public void setOzoneRestPort(int port) {
+    ozoneRestPort = port;
+  }
+
+  /**
+   * Returns Ozone Rest Port.
+   * @return OzoneRestPort
+   */
+  public int getOzoneRestPort() {
+    return ozoneRestPort;
+  }
+
+  /**
+   * Returns a DatanodeDetails from the protocol buffers.
+   *
+   * @param datanodeDetailsProto - protoBuf Message
+   * @return DatanodeDetails
+   */
+  public static DatanodeDetails getFromProtoBuf(
+      HddsProtos.DatanodeDetailsProto datanodeDetailsProto) {
+    DatanodeDetails.Builder builder = newBuilder();
+    builder.setUuid(datanodeDetailsProto.getUuid());
+    if (datanodeDetailsProto.hasIpAddress()) {
+      builder.setIpAddress(datanodeDetailsProto.getIpAddress());
+    }
+    if (datanodeDetailsProto.hasHostName()) {
+      builder.setHostName(datanodeDetailsProto.getHostName());
+    }
+    if (datanodeDetailsProto.hasContainerPort()) {
+      builder.setContainerPort(datanodeDetailsProto.getContainerPort());
+    }
+    if (datanodeDetailsProto.hasRatisPort()) {
+      builder.setRatisPort(datanodeDetailsProto.getRatisPort());
+    }
+    if (datanodeDetailsProto.hasOzoneRestPort()) {
+      builder.setOzoneRestPort(datanodeDetailsProto.getOzoneRestPort());
+    }
+    return builder.build();
+  }
+
+  /**
+   * Returns a DatanodeDetails protobuf message from a datanode ID.
+   * @return HddsProtos.DatanodeDetailsProto
+   */
+  public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
+    HddsProtos.DatanodeDetailsProto.Builder builder =
+        HddsProtos.DatanodeDetailsProto.newBuilder()
+            .setUuid(getUuidString());
+    if (ipAddress != null) {
+      builder.setIpAddress(ipAddress);
+    }
+    if (hostName != null) {
+      builder.setHostName(hostName);
+    }
+    if (containerPort != null) {
+      builder.setContainerPort(containerPort);
+    }
+    if (ratisPort != null) {
+      builder.setRatisPort(ratisPort);
+    }
+    if (ozoneRestPort != null) {
+      builder.setOzoneRestPort(ozoneRestPort);
+    }
+    return builder.build();
+  }
+
+  @Override
+  public String toString() {
+    return uuid.toString() + "{" +
+        "ip: " +
+        ipAddress +
+        ", host: " +
+        hostName +
+        "}";
+  }
+
+  @Override
+  public int compareTo(DatanodeDetails that) {
+    return this.getUuid().compareTo(that.getUuid());
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    return obj instanceof DatanodeDetails &&
+        uuid.equals(((DatanodeDetails) obj).uuid);
+  }
+
+  @Override
+  public int hashCode() {
+    return uuid.hashCode();
+  }
+
+  /**
+   * Returns DatanodeDetails.Builder instance.
+   *
+   * @return DatanodeDetails.Builder
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder class for building DatanodeDetails.
+   */
+  public static class Builder {
+    private String id;
+    private String ipAddress;
+    private String hostName;
+    private Integer containerPort;
+    private Integer ratisPort;
+    private Integer ozoneRestPort;
+
+    /**
+     * Sets the DatanodeUuid.
+     *
+     * @param uuid DatanodeUuid
+     * @return DatanodeDetails.Builder
+     */
+    public Builder setUuid(String uuid) {
+      this.id = uuid;
+      return this;
+    }
+
+    /**
+     * Sets the IP address of DataNode.
+     *
+     * @param ip address
+     * @return DatanodeDetails.Builder
+     */
+    public Builder setIpAddress(String ip) {
+      this.ipAddress = ip;
+      return this;
+    }
+
+    /**
+     * Sets the hostname of DataNode.
+     *
+     * @param host hostname
+     * @return DatanodeDetails.Builder
+     */
+    public Builder setHostName(String host) {
+      this.hostName = host;
+      return this;
+    }
+    /**
+     * Sets the ContainerPort.
+     *
+     * @param port ContainerPort
+     * @return DatanodeDetails.Builder
+     */
+    public Builder setContainerPort(Integer port) {
+      this.containerPort = port;
+      return this;
+    }
+
+    /**
+     * Sets the RatisPort.
+     *
+     * @param port RatisPort
+     * @return DatanodeDetails.Builder
+     */
+    public Builder setRatisPort(Integer port) {
+      this.ratisPort = port;
+      return this;
+    }
+
+    /**
+     * Sets the OzoneRestPort.
+     *
+     * @param port OzoneRestPort
+     * @return DatanodeDetails.Builder
+     */
+    public Builder setOzoneRestPort(Integer port) {
+      this.ozoneRestPort = port;
+      return this;
+    }
+
+    /**
+     * Builds and returns DatanodeDetails instance.
+     *
+     * @return DatanodeDetails
+     */
+    public DatanodeDetails build() {
+      Preconditions.checkNotNull(id);
+      return new DatanodeDetails(id, ipAddress, hostName, containerPort,
+          ratisPort, ozoneRestPort);
+    }
+
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
new file mode 100644
index 0000000..7dae0fc
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains HDDS protocol related classes.
+ */
+package org.apache.hadoop.hdds.protocol;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
new file mode 100644
index 0000000..7f40ab2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This class contains constants for configuration keys used in SCM.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public final class ScmConfigKeys {
+
+  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY =
+      "scm.container.client.idle.threshold";
+  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
+      "10s";
+
+  public static final String SCM_CONTAINER_CLIENT_MAX_SIZE_KEY =
+      "scm.container.client.max.size";
+  public static final int SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT =
+      256;
+
+  public static final String SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS =
+      "scm.container.client.max.outstanding.requests";
+  public static final int SCM_CONTAINER_CLIENT_MAX_OUTSTANDING_REQUESTS_DEFAULT
+      = 100;
+
+  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
+      = "dfs.container.ratis.enabled";
+  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
+      = false;
+  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
+      = "dfs.container.ratis.rpc.type";
+  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
+      = "GRPC";
+  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
+      = "dfs.container.ratis.num.write.chunk.threads";
+  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
+      = 60;
+  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
+      "dfs.container.ratis.segment.size";
+  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
+      1 * 1024 * 1024 * 1024;
+  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY =
+      "dfs.container.ratis.segment.preallocated.size";
+  public static final int
+      DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 1024 * 1024;
+
+  // TODO : this is copied from OzoneConsts, may need to move to a better place
+  public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
+  // 16 MB by default
+  public static final int OZONE_SCM_CHUNK_SIZE_DEFAULT = 16 * 1024 * 1024;
+  public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024;
+
+  public static final String OZONE_SCM_CLIENT_PORT_KEY =
+      "ozone.scm.client.port";
+  public static final int OZONE_SCM_CLIENT_PORT_DEFAULT = 9860;
+
+  public static final String OZONE_SCM_DATANODE_PORT_KEY =
+      "ozone.scm.datanode.port";
+  public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861;
+
+  // OZONE_KSM_PORT_DEFAULT = 9862
+  public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY =
+      "ozone.scm.block.client.port";
+  public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863;
+
+  // Container service client
+  public static final String OZONE_SCM_CLIENT_ADDRESS_KEY =
+      "ozone.scm.client.address";
+  public static final String OZONE_SCM_CLIENT_BIND_HOST_KEY =
+      "ozone.scm.client.bind.host";
+  public static final String OZONE_SCM_CLIENT_BIND_HOST_DEFAULT =
+      "0.0.0.0";
+
+  // Block service client
+  public static final String OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY =
+      "ozone.scm.block.client.address";
+  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY =
+      "ozone.scm.block.client.bind.host";
+  public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT =
+      "0.0.0.0";
+
+  public static final String OZONE_SCM_DATANODE_ADDRESS_KEY =
+      "ozone.scm.datanode.address";
+  public static final String OZONE_SCM_DATANODE_BIND_HOST_KEY =
+      "ozone.scm.datanode.bind.host";
+  public static final String OZONE_SCM_DATANODE_BIND_HOST_DEFAULT =
+      "0.0.0.0";
+
+  public static final String OZONE_SCM_HTTP_ENABLED_KEY =
+      "ozone.scm.http.enabled";
+  public static final String OZONE_SCM_HTTP_BIND_HOST_KEY =
+      "ozone.scm.http-bind-host";
+  public static final String OZONE_SCM_HTTPS_BIND_HOST_KEY =
+      "ozone.scm.https-bind-host";
+  public static final String OZONE_SCM_HTTP_ADDRESS_KEY =
+      "ozone.scm.http-address";
+  public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
+      "ozone.scm.https-address";
+  public static final String OZONE_SCM_KEYTAB_FILE =
+      "ozone.scm.keytab.file";
+  public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
+  public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
+  public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
+
+  public static final String HDDS_REST_HTTP_ADDRESS_KEY =
+      "hdds.rest.http-address";
+  public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
+  public static final String HDDS_REST_CSRF_ENABLED_KEY =
+      "hdds.rest.rest-csrf.enabled";
+  public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false;
+  public static final String HDDS_REST_NETTY_HIGH_WATERMARK =
+      "hdds.rest.netty.high.watermark";
+  public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536;
+  public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768;
+  public static final String HDDS_REST_NETTY_LOW_WATERMARK =
+      "hdds.rest.netty.low.watermark";
+
+  public static final String OZONE_SCM_HANDLER_COUNT_KEY =
+      "ozone.scm.handler.count.key";
+  public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
+
+  public static final String OZONE_SCM_HEARTBEAT_INTERVAL =
+      "ozone.scm.heartbeat.interval";
+  public static final String OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT =
+      "30s";
+
+  public static final String OZONE_SCM_DEADNODE_INTERVAL =
+      "ozone.scm.dead.node.interval";
+  public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
+      "10m";
+
+  public static final String OZONE_SCM_MAX_HB_COUNT_TO_PROCESS =
+      "ozone.scm.max.hb.count.to.process";
+  public static final int OZONE_SCM_MAX_HB_COUNT_TO_PROCESS_DEFAULT = 5000;
+
+  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
+      "ozone.scm.heartbeat.thread.interval";
+  public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =
+      "3s";
+
+  public static final String OZONE_SCM_STALENODE_INTERVAL =
+      "ozone.scm.stale.node.interval";
+  public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
+      "90s";
+
+  public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
+      "ozone.scm.heartbeat.rpc-timeout";
+  public static final long OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT =
+      1000;
+
+  /**
+   * Defines how frequently we will log the missing of heartbeat to a specific
+   * SCM. In the default case we will write a warning message for each 10
+   * sequential heart beats that we miss to a specific SCM. This is to avoid
+   * overrunning the log with lots of HB missed Log statements.
+   */
+  public static final String OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT =
+      "ozone.scm.heartbeat.log.warn.interval.count";
+  public static final int OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT =
+      10;
+
+  // ozone.scm.names key is a set of DNS | DNS:PORT | IP Address | IP:PORT.
+  // Written as a comma separated string. e.g. scm1, scm2:8020, 7.7.7.7:7777
+  //
+  // If this key is not specified datanodes will not be able to find
+  // SCM. The SCM membership can be dynamic, so this key should contain
+  // all possible SCM names. Once the SCM leader is discovered datanodes will
+  // get the right list of SCMs to heartbeat to from the leader.
+  // While it is good for the datanodes to know the names of all SCM nodes,
+  // it is sufficient to actually know the name of on working SCM. That SCM
+  // will be able to return the information about other SCMs that are part of
+  // the SCM replicated Log.
+  //
+  //In case of a membership change, any one of the SCM machines will be
+  // able to send back a new list to the datanodes.
+  public static final String OZONE_SCM_NAMES = "ozone.scm.names";
+
+  public static final int OZONE_SCM_DEFAULT_PORT =
+      OZONE_SCM_DATANODE_PORT_DEFAULT;
+  // File Name and path where datanode ID is to written to.
+  // if this value is not set then container startup will fail.
+  public static final String OZONE_SCM_DATANODE_ID = "ozone.scm.datanode.id";
+
+  public static final String OZONE_SCM_DATANODE_ID_PATH_DEFAULT = "datanode.id";
+
+  public static final String OZONE_SCM_DB_CACHE_SIZE_MB =
+      "ozone.scm.db.cache.size.mb";
+  public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128;
+
+  public static final String OZONE_SCM_CONTAINER_SIZE_GB =
+      "ozone.scm.container.size.gb";
+  public static final int OZONE_SCM_CONTAINER_SIZE_DEFAULT = 5;
+
+  public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY =
+      "ozone.scm.container.placement.impl";
+
+  public static final String OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE =
+      "ozone.scm.container.provision_batch_size";
+  public static final int OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE_DEFAULT = 20;
+
+  public static final String OZONE_SCM_CONTAINER_DELETION_CHOOSING_POLICY =
+      "ozone.scm.container.deletion-choosing.policy";
+
+  public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT =
+      "ozone.scm.container.creation.lease.timeout";
+
+  public static final String
+      OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
+
+  /**
+   * Don't start processing a pool if we have not had a minimum number of
+   * seconds from the last processing.
+   */
+  public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
+      "ozone.scm.container.report.processing.interval";
+  public static final String
+      OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
+
+  /**
+   * This determines the total number of pools to be processed in parallel.
+   */
+  public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS =
+      "ozone.scm.max.nodepool.processing.threads";
+  public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1;
+  /**
+   * These 2 settings control the number of threads in executor pool and time
+   * outs for thw container reports from all nodes.
+   */
+  public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
+      "ozone.scm.max.container.report.threads";
+  public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
+  public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
+      "ozone.scm.container.reports.wait.timeout";
+  public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
+      "5m";
+
+  public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
+      "ozone.scm.block.deletion.max.retry";
+  public static final int OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT = 4096;
+
+  // Once a container usage crosses this threshold, it is eligible for
+  // closing.
+  public static final String OZONE_SCM_CONTAINER_CLOSE_THRESHOLD =
+      "ozone.scm.container.close.threshold";
+  public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
+  /**
+   * Never constructed.
+   */
+  private ScmConfigKeys() {
+
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
new file mode 100644
index 0000000..6236feb
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+/**
+ * ScmInfo wraps the result returned from SCM#getScmInfo which
+ * contains clusterId and the SCM Id.
+ */
+public final class ScmInfo {
+  private String clusterId;
+  private String scmId;
+
+  /**
+   * Builder for ScmInfo.
+   */
+  public static class Builder {
+    private String clusterId;
+    private String scmId;
+
+    /**
+     * sets the cluster id.
+     * @param cid clusterId to be set
+     * @return Builder for ScmInfo
+     */
+    public Builder setClusterId(String cid) {
+      this.clusterId = cid;
+      return this;
+    }
+
+    /**
+     * sets the scmId.
+     * @param id scmId
+     * @return Builder for scmInfo
+     */
+    public Builder setScmId(String id) {
+      this.scmId = id;
+      return this;
+    }
+
+    public ScmInfo build() {
+      return new ScmInfo(clusterId, scmId);
+    }
+  }
+
+  private ScmInfo(String clusterId, String scmId) {
+    this.clusterId = clusterId;
+    this.scmId = scmId;
+  }
+
+  /**
+   * Gets the clusterId from the Version file.
+   * @return ClusterId
+   */
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  /**
+   * Gets the SCM Id from the Version file.
+   * @return SCM Id
+   */
+  public String getScmId() {
+    return scmId;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
new file mode 100644
index 0000000..c96f79b
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A Client for the storageContainer protocol.
+ */
+public abstract class XceiverClientSpi implements Closeable {
+
+  final private AtomicInteger referenceCount;
+  private boolean isEvicted;
+
+  XceiverClientSpi() {
+    this.referenceCount = new AtomicInteger(0);
+    this.isEvicted = false;
+  }
+
+  void incrementReference() {
+    this.referenceCount.incrementAndGet();
+  }
+
+  void decrementReference() {
+    this.referenceCount.decrementAndGet();
+    cleanup();
+  }
+
+  void setEvicted() {
+    isEvicted = true;
+    cleanup();
+  }
+
+  // close the xceiverClient only if,
+  // 1) there is no refcount on the client
+  // 2) it has been evicted from the cache.
+  private void cleanup() {
+    if (referenceCount.get() == 0 && isEvicted) {
+      close();
+    }
+  }
+
+  @VisibleForTesting
+  public int getRefcount() {
+    return referenceCount.get();
+  }
+
+  /**
+   * Connects to the leader in the pipeline.
+   */
+  public abstract void connect() throws Exception;
+
+  @Override
+  public abstract void close();
+
+  /**
+   * Returns the pipeline of machines that host the container used by this
+   * client.
+   *
+   * @return pipeline of machines that host the container
+   */
+  public abstract Pipeline getPipeline();
+
+  /**
+   * Sends a given command to server and gets the reply back.
+   * @param request Request
+   * @return Response to the command
+   * @throws IOException
+   */
+  public abstract ContainerCommandResponseProto sendCommand(
+      ContainerCommandRequestProto request) throws IOException;
+
+  /**
+   * Sends a given command to server gets a waitable future back.
+   *
+   * @param request Request
+   * @return Response to the command
+   * @throws IOException
+   */
+  public abstract CompletableFuture<ContainerCommandResponseProto>
+      sendCommandAsync(ContainerCommandRequestProto request)
+      throws IOException, ExecutionException, InterruptedException;
+
+  /**
+   * Create a pipeline.
+   *
+   * @param pipelineID - Name of the pipeline.
+   * @param datanodes - Datanodes
+   */
+  public abstract void createPipeline(String pipelineID,
+      List<DatanodeDetails> datanodes) throws IOException;
+
+  /**
+   * Returns pipeline Type.
+   *
+   * @return - {Stand_Alone, Ratis or Chained}
+   */
+  public abstract HddsProtos.ReplicationType getPipelineType();
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
new file mode 100644
index 0000000..0d4a299
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.client;
+
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+
+/**
+ * The interface to call into underlying container layer.
+ *
+ * Written as interface to allow easy testing: implement a mock container layer
+ * for standalone testing of CBlock API without actually calling into remote
+ * containers. Actual container layer can simply re-implement this.
+ *
+ * NOTE this is temporarily needed class. When SCM containers are full-fledged,
+ * this interface will likely be removed.
+ */
+@InterfaceStability.Unstable
+public interface ScmClient {
+  /**
+   * Creates a Container on SCM and returns the pipeline.
+   * @param containerId - String container ID
+   * @return Pipeline
+   * @throws IOException
+   */
+  Pipeline createContainer(String containerId, String owner) throws IOException;
+
+  /**
+   * Gets a container by Name -- Throws if the container does not exist.
+   * @param containerId - String Container ID
+   * @return Pipeline
+   * @throws IOException
+   */
+  Pipeline getContainer(String containerId) throws IOException;
+
+  /**
+   * Close a container by name.
+   *
+   * @param pipeline the container to be closed.
+   * @throws IOException
+   */
+  void closeContainer(Pipeline pipeline) throws IOException;
+
+  /**
+   * Deletes an existing container.
+   * @param pipeline - Pipeline that represents the container.
+   * @param force - true to forcibly delete the container.
+   * @throws IOException
+   */
+  void deleteContainer(Pipeline pipeline, boolean force) throws IOException;
+
+  /**
+   * Lists a range of containers and get their info.
+   *
+   * @param startName start name, if null, start searching at the head.
+   * @param prefixName prefix name, if null, then filter is disabled.
+   * @param count count, if count < 0, the max size is unlimited.(
+   *              Usually the count will be replace with a very big
+   *              value instead of being unlimited in case the db is very big)
+   *
+   * @return a list of pipeline.
+   * @throws IOException
+   */
+  List<ContainerInfo> listContainer(String startName, String prefixName,
+      int count) throws IOException;
+
+  /**
+   * Read meta data from an existing container.
+   * @param pipeline - Pipeline that represents the container.
+   * @return ContainerInfo
+   * @throws IOException
+   */
+  ContainerData readContainer(Pipeline pipeline) throws IOException;
+
+
+  /**
+   * Gets the container size -- Computed by SCM from Container Reports.
+   * @param pipeline - Pipeline
+   * @return number of bytes used by this container.
+   * @throws IOException
+   */
+  long getContainerSize(Pipeline pipeline) throws IOException;
+
+  /**
+   * Creates a Container on SCM and returns the pipeline.
+   * @param type - Replication Type.
+   * @param replicationFactor - Replication Factor
+   * @param containerId - Container ID
+   * @return Pipeline
+   * @throws IOException - in case of error.
+   */
+  Pipeline createContainer(HddsProtos.ReplicationType type,
+      HddsProtos.ReplicationFactor replicationFactor, String containerId,
+      String owner) throws IOException;
+
+  /**
+   * Returns a set of Nodes that meet a query criteria.
+   * @param nodeStatuses - A set of criteria that we want the node to have.
+   * @param queryScope - Query scope - Cluster or pool.
+   * @param poolName - if it is pool, a pool name is required.
+   * @return A set of nodes that meet the requested criteria.
+   * @throws IOException
+   */
+  HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses,
+      HddsProtos.QueryScope queryScope, String poolName) throws IOException;
+
+  /**
+   * Creates a specified replication pipeline.
+   * @param type - Type
+   * @param factor - Replication factor
+   * @param nodePool - Set of machines.
+   * @throws IOException
+   */
+  Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
+      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
+      throws IOException;
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
new file mode 100644
index 0000000..e2f7033
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.client;
+
+/**
+ * This package contains classes for the client of the storage container
+ * protocol.
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
new file mode 100644
index 0000000..9520c8c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.math3.util.MathUtils;
+
+/**
+ * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
+ * <p>
+ * We are creating a specific type for this to avoid mixing this with
+ * normal integers in code.
+ */
+public class ContainerID implements Comparable {
+
+  private final long id;
+
+  /**
+   * Constructs ContainerID.
+   *
+   * @param id int
+   */
+  public ContainerID(long id) {
+    Preconditions.checkState(id > 0,
+        "Container ID should be a positive int");
+    this.id = id;
+  }
+
+  /**
+   * Factory method for creation of ContainerID.
+   * @param containerID  long
+   * @return ContainerID.
+   */
+  public static ContainerID valueof(long containerID) {
+    Preconditions.checkState(containerID > 0);
+    return new ContainerID(containerID);
+  }
+
+  /**
+   * Returns int representation of ID.
+   *
+   * @return int
+   */
+  public long getId() {
+    return id;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    ContainerID that = (ContainerID) o;
+
+    return id == that.id;
+  }
+
+  @Override
+  public int hashCode() {
+    return MathUtils.hash(id);
+  }
+
+  @Override
+  public int compareTo(Object o) {
+    Preconditions.checkNotNull(o);
+    if (o instanceof ContainerID) {
+      return Long.compare(((ContainerID) o).getId(), this.getId());
+    }
+    throw new IllegalArgumentException("Object O, should be an instance " +
+        "of ContainerID");
+  }
+
+  @Override
+  public String toString() {
+    return "id=" + id;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
new file mode 100644
index 0000000..d253b15
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+/**
+ * Allocated block wraps the result returned from SCM#allocateBlock which
+ * contains a Pipeline and the key.
+ */
+public final class AllocatedBlock {
+  private Pipeline pipeline;
+  private String key;
+  // Indicates whether the client should create container before writing block.
+  private boolean shouldCreateContainer;
+
+  /**
+   * Builder for AllocatedBlock.
+   */
+  public static class Builder {
+    private Pipeline pipeline;
+    private String key;
+    private boolean shouldCreateContainer;
+
+    public Builder setPipeline(Pipeline p) {
+      this.pipeline = p;
+      return this;
+    }
+
+    public Builder setKey(String k) {
+      this.key = k;
+      return this;
+    }
+
+    public Builder setShouldCreateContainer(boolean shouldCreate) {
+      this.shouldCreateContainer = shouldCreate;
+      return this;
+    }
+
+    public AllocatedBlock build() {
+      return new AllocatedBlock(pipeline, key, shouldCreateContainer);
+    }
+  }
+
+  private AllocatedBlock(Pipeline pipeline, String key,
+      boolean shouldCreateContainer) {
+    this.pipeline = pipeline;
+    this.key = key;
+    this.shouldCreateContainer = shouldCreateContainer;
+  }
+
+  public Pipeline getPipeline() {
+    return pipeline;
+  }
+
+  public String getKey() {
+    return key;
+  }
+
+  public boolean getCreateContainer() {
+    return shouldCreateContainer;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
new file mode 100644
index 0000000..823a7fb
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -0,0 +1,333 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.util.Time;
+
+import java.util.Comparator;
+
+/**
+ * Class wraps ozone container info.
+ */
+public class ContainerInfo
+    implements Comparator<ContainerInfo>, Comparable<ContainerInfo> {
+  private HddsProtos.LifeCycleState state;
+  private Pipeline pipeline;
+  // Bytes allocated by SCM for clients.
+  private long allocatedBytes;
+  // Actual container usage, updated through heartbeat.
+  private long usedBytes;
+  private long numberOfKeys;
+  private long lastUsed;
+  // The wall-clock ms since the epoch at which the current state enters.
+  private long stateEnterTime;
+  private String owner;
+  private String containerName;
+  private long containerID;
+  ContainerInfo(
+      long containerID,
+      final String containerName,
+      HddsProtos.LifeCycleState state,
+      Pipeline pipeline,
+      long allocatedBytes,
+      long usedBytes,
+      long numberOfKeys,
+      long stateEnterTime,
+      String owner) {
+    this.containerID = containerID;
+    this.containerName = containerName;
+    this.pipeline = pipeline;
+    this.allocatedBytes = allocatedBytes;
+    this.usedBytes = usedBytes;
+    this.numberOfKeys = numberOfKeys;
+    this.lastUsed = Time.monotonicNow();
+    this.state = state;
+    this.stateEnterTime = stateEnterTime;
+    this.owner = owner;
+  }
+
+  /**
+   * Needed for serialization findbugs.
+   */
+  public ContainerInfo() {
+  }
+
+  public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) {
+    ContainerInfo.Builder builder = new ContainerInfo.Builder();
+    builder.setPipeline(Pipeline.getFromProtoBuf(info.getPipeline()));
+    builder.setAllocatedBytes(info.getAllocatedBytes());
+    builder.setUsedBytes(info.getUsedBytes());
+    builder.setNumberOfKeys(info.getNumberOfKeys());
+    builder.setState(info.getState());
+    builder.setStateEnterTime(info.getStateEnterTime());
+    builder.setOwner(info.getOwner());
+    builder.setContainerName(info.getContainerName());
+    builder.setContainerID(info.getContainerID());
+    return builder.build();
+  }
+
+  public long getContainerID() {
+    return containerID;
+  }
+
+  public String getContainerName() {
+    return containerName;
+  }
+
+  public HddsProtos.LifeCycleState getState() {
+    return state;
+  }
+
+  public void setState(HddsProtos.LifeCycleState state) {
+    this.state = state;
+  }
+
+  public long getStateEnterTime() {
+    return stateEnterTime;
+  }
+
+  public Pipeline getPipeline() {
+    return pipeline;
+  }
+
+  public long getAllocatedBytes() {
+    return allocatedBytes;
+  }
+
+  /**
+   * Set Allocated bytes.
+   *
+   * @param size - newly allocated bytes -- negative size is case of deletes
+   * can be used.
+   */
+  public void updateAllocatedBytes(long size) {
+    this.allocatedBytes += size;
+  }
+
+  public long getUsedBytes() {
+    return usedBytes;
+  }
+
+  public long getNumberOfKeys() {
+    return numberOfKeys;
+  }
+
+  public ContainerID containerID() {
+    return new ContainerID(getContainerID());
+  }
+
+  /**
+   * Gets the last used time from SCM's perspective.
+   *
+   * @return time in milliseconds.
+   */
+  public long getLastUsed() {
+    return lastUsed;
+  }
+
+  public void updateLastUsedTime() {
+    lastUsed = Time.monotonicNow();
+  }
+
+  public void allocate(long size) {
+    // should we also have total container size in ContainerInfo
+    // and check before allocating?
+    allocatedBytes += size;
+  }
+
+  public HddsProtos.SCMContainerInfo getProtobuf() {
+    HddsProtos.SCMContainerInfo.Builder builder =
+        HddsProtos.SCMContainerInfo.newBuilder();
+    builder.setPipeline(getPipeline().getProtobufMessage());
+    builder.setAllocatedBytes(getAllocatedBytes());
+    builder.setUsedBytes(getUsedBytes());
+    builder.setNumberOfKeys(getNumberOfKeys());
+    builder.setState(state);
+    builder.setStateEnterTime(stateEnterTime);
+    builder.setContainerID(getContainerID());
+
+    if (getOwner() != null) {
+      builder.setOwner(getOwner());
+    }
+    builder.setContainerName(getContainerName());
+    return builder.build();
+  }
+
+  public String getOwner() {
+    return owner;
+  }
+
+  public void setOwner(String owner) {
+    this.owner = owner;
+  }
+
+  @Override
+  public String toString() {
+    return "ContainerInfo{"
+        + "state=" + state
+        + ", pipeline=" + pipeline
+        + ", stateEnterTime=" + stateEnterTime
+        + ", owner=" + owner
+        + ", containerName='" + containerName
+        + '}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    ContainerInfo that = (ContainerInfo) o;
+
+    return new EqualsBuilder()
+        .append(pipeline.getContainerName(), that.pipeline.getContainerName())
+
+        // TODO : Fix this later. If we add these factors some tests fail.
+        // So Commenting this to continue and will enforce this with
+        // Changes in pipeline where we remove Container Name to
+        // SCMContainerinfo from Pipeline.
+        // .append(pipeline.getFactor(), that.pipeline.getFactor())
+        // .append(pipeline.getType(), that.pipeline.getType())
+        .append(owner, that.owner)
+        .isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(11, 811)
+        .append(pipeline.getContainerName())
+        .append(pipeline.getFactor())
+        .append(pipeline.getType())
+        .append(owner)
+        .toHashCode();
+  }
+
+  /**
+   * Compares its two arguments for order.  Returns a negative integer, zero, or
+   * a positive integer as the first argument is less than, equal to, or greater
+   * than the second.<p>
+   *
+   * @param o1 the first object to be compared.
+   * @param o2 the second object to be compared.
+   * @return a negative integer, zero, or a positive integer as the first
+   * argument is less than, equal to, or greater than the second.
+   * @throws NullPointerException if an argument is null and this comparator
+   *                              does not permit null arguments
+   * @throws ClassCastException   if the arguments' types prevent them from
+   *                              being compared by this comparator.
+   */
+  @Override
+  public int compare(ContainerInfo o1, ContainerInfo o2) {
+    return Long.compare(o1.getLastUsed(), o2.getLastUsed());
+  }
+
+  /**
+   * Compares this object with the specified object for order.  Returns a
+   * negative integer, zero, or a positive integer as this object is less than,
+   * equal to, or greater than the specified object.
+   *
+   * @param o the object to be compared.
+   * @return a negative integer, zero, or a positive integer as this object is
+   * less than, equal to, or greater than the specified object.
+   * @throws NullPointerException if the specified object is null
+   * @throws ClassCastException   if the specified object's type prevents it
+   *                              from being compared to this object.
+   */
+  @Override
+  public int compareTo(ContainerInfo o) {
+    return this.compare(this, o);
+  }
+
+  /**
+   * Builder class for ContainerInfo.
+   */
+  public static class Builder {
+    private HddsProtos.LifeCycleState state;
+    private Pipeline pipeline;
+    private long allocated;
+    private long used;
+    private long keys;
+    private long stateEnterTime;
+    private String owner;
+    private String containerName;
+    private long containerID;
+
+    public Builder setContainerID(long id) {
+      Preconditions.checkState(id >= 0);
+      this.containerID = id;
+      return this;
+    }
+
+    public Builder setState(HddsProtos.LifeCycleState lifeCycleState) {
+      this.state = lifeCycleState;
+      return this;
+    }
+
+    public Builder setPipeline(Pipeline containerPipeline) {
+      this.pipeline = containerPipeline;
+      return this;
+    }
+
+    public Builder setAllocatedBytes(long bytesAllocated) {
+      this.allocated = bytesAllocated;
+      return this;
+    }
+
+    public Builder setUsedBytes(long bytesUsed) {
+      this.used = bytesUsed;
+      return this;
+    }
+
+    public Builder setNumberOfKeys(long keyCount) {
+      this.keys = keyCount;
+      return this;
+    }
+
+    public Builder setStateEnterTime(long time) {
+      this.stateEnterTime = time;
+      return this;
+    }
+
+    public Builder setOwner(String containerOwner) {
+      this.owner = containerOwner;
+      return this;
+    }
+
+    public Builder setContainerName(String container) {
+      this.containerName = container;
+      return this;
+    }
+
+    public ContainerInfo build() {
+      return new
+          ContainerInfo(containerID, containerName, state, pipeline,
+          allocated, used, keys, stateEnterTime, owner);
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java
new file mode 100644
index 0000000..fd97eae
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import static org.apache.hadoop.hdds.protocol.proto
+    .ScmBlockLocationProtocolProtos.DeleteScmBlockResult;
+
+/**
+ * Class wraps storage container manager block deletion results.
+ */
+public class DeleteBlockResult {
+  private String key;
+  private DeleteScmBlockResult.Result result;
+
+  public DeleteBlockResult(final String key,
+      final DeleteScmBlockResult.Result result) {
+    this.key = key;
+    this.result = result;
+  }
+
+  /**
+   * Get key deleted.
+   * @return key name.
+   */
+  public String getKey() {
+    return key;
+  }
+
+  /**
+   * Get key deletion result.
+   * @return key deletion result.
+   */
+  public DeleteScmBlockResult.Result getResult() {
+    return result;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
new file mode 100644
index 0000000..32d0a2d
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import com.fasterxml.jackson.annotation.JsonAutoDetect;
+import com.fasterxml.jackson.annotation.JsonFilter;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.PropertyAccessor;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.fasterxml.jackson.databind.ser.FilterProvider;
+import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
+import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * A pipeline represents the group of machines over which a container lives.
+ */
+public class Pipeline {
+  static final String PIPELINE_INFO = "PIPELINE_INFO_FILTER";
+  private static final ObjectWriter WRITER;
+
+  static {
+    ObjectMapper mapper = new ObjectMapper();
+    String[] ignorableFieldNames = {"data"};
+    FilterProvider filters = new SimpleFilterProvider()
+        .addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter
+            .serializeAllExcept(ignorableFieldNames));
+    mapper.setVisibility(PropertyAccessor.FIELD,
+        JsonAutoDetect.Visibility.ANY);
+    mapper.addMixIn(Object.class, MixIn.class);
+
+    WRITER = mapper.writer(filters);
+  }
+
+  private String containerName;
+  private PipelineChannel pipelineChannel;
+  /**
+   * Allows you to maintain private data on pipelines. This is not serialized
+   * via protobuf, just allows us to maintain some private data.
+   */
+  @JsonIgnore
+  private byte[] data;
+  /**
+   * Constructs a new pipeline data structure.
+   *
+   * @param containerName - Container
+   * @param pipelineChannel - transport information for this container
+   */
+  public Pipeline(String containerName, PipelineChannel pipelineChannel) {
+    this.containerName = containerName;
+    this.pipelineChannel = pipelineChannel;
+    data = null;
+  }
+
+  /**
+   * Gets pipeline object from protobuf.
+   *
+   * @param pipeline - ProtoBuf definition for the pipeline.
+   * @return Pipeline Object
+   */
+  public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) {
+    Preconditions.checkNotNull(pipeline);
+    PipelineChannel pipelineChannel =
+        PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel());
+    return new Pipeline(pipeline.getContainerName(), pipelineChannel);
+  }
+
+  public HddsProtos.ReplicationFactor getFactor() {
+    return pipelineChannel.getFactor();
+  }
+
+  /**
+   * Returns the first machine in the set of datanodes.
+   *
+   * @return First Machine.
+   */
+  @JsonIgnore
+  public DatanodeDetails getLeader() {
+    return pipelineChannel.getDatanodes().get(pipelineChannel.getLeaderID());
+  }
+
+  /**
+   * Returns the leader host.
+   *
+   * @return First Machine.
+   */
+  public String getLeaderHost() {
+    return pipelineChannel.getDatanodes()
+        .get(pipelineChannel.getLeaderID()).getHostName();
+  }
+
+  /**
+   * Returns all machines that make up this pipeline.
+   *
+   * @return List of Machines.
+   */
+  @JsonIgnore
+  public List<DatanodeDetails> getMachines() {
+    return new ArrayList<>(pipelineChannel.getDatanodes().values());
+  }
+
+  /**
+   * Returns all machines that make up this pipeline.
+   *
+   * @return List of Machines.
+   */
+  public List<String> getDatanodeHosts() {
+    List<String> dataHosts = new ArrayList<>();
+    for (DatanodeDetails id : pipelineChannel.getDatanodes().values()) {
+      dataHosts.add(id.getHostName());
+    }
+    return dataHosts;
+  }
+
+  /**
+   * Return a Protobuf Pipeline message from pipeline.
+   *
+   * @return Protobuf message
+   */
+  @JsonIgnore
+  public HddsProtos.Pipeline getProtobufMessage() {
+    HddsProtos.Pipeline.Builder builder =
+        HddsProtos.Pipeline.newBuilder();
+    builder.setContainerName(this.containerName);
+    builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage());
+    return builder.build();
+  }
+
+  /**
+   * Returns containerName if available.
+   *
+   * @return String.
+   */
+  public String getContainerName() {
+    return containerName;
+  }
+
+  /**
+   * Returns private data that is set on this pipeline.
+   *
+   * @return blob, the user can interpret it any way they like.
+   */
+  public byte[] getData() {
+    if (this.data != null) {
+      return Arrays.copyOf(this.data, this.data.length);
+    } else {
+      return null;
+    }
+  }
+
+  @VisibleForTesting
+  public PipelineChannel getPipelineChannel() {
+    return pipelineChannel;
+  }
+
+  /**
+   * Set private data on pipeline.
+   *
+   * @param data -- private data.
+   */
+  public void setData(byte[] data) {
+    if (data != null) {
+      this.data = Arrays.copyOf(data, data.length);
+    }
+  }
+
+  /**
+   * Gets the State of the pipeline.
+   *
+   * @return - LifeCycleStates.
+   */
+  public HddsProtos.LifeCycleState getLifeCycleState() {
+    return pipelineChannel.getLifeCycleState();
+  }
+
+  /**
+   * Gets the pipeline Name.
+   *
+   * @return - Name of the pipeline
+   */
+  public String getPipelineName() {
+    return pipelineChannel.getName();
+  }
+
+  /**
+   * Returns the type.
+   *
+   * @return type - Standalone, Ratis, Chained.
+   */
+  public HddsProtos.ReplicationType getType() {
+    return pipelineChannel.getType();
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder b = new StringBuilder(getClass().getSimpleName())
+        .append("[");
+    pipelineChannel.getDatanodes().keySet().stream()
+        .forEach(id -> b.
+            append(id.endsWith(pipelineChannel.getLeaderID()) ? "*" + id : id));
+    b.append("] container:").append(containerName);
+    b.append(" name:").append(getPipelineName());
+    if (getType() != null) {
+      b.append(" type:").append(getType().toString());
+    }
+    if (getFactor() != null) {
+      b.append(" factor:").append(getFactor().toString());
+    }
+    if (getLifeCycleState() != null) {
+      b.append(" State:").append(getLifeCycleState().toString());
+    }
+    return b.toString();
+  }
+
+  /**
+   * Returns a JSON string of this object.
+   *
+   * @return String - json string
+   * @throws IOException
+   */
+  public String toJsonString() throws IOException {
+    return WRITER.writeValueAsString(this);
+  }
+
+  @JsonFilter(PIPELINE_INFO)
+  class MixIn {
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java
new file mode 100644
index 0000000..ebd52e9
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * PipelineChannel information for a {@link Pipeline}.
+ */
+public class PipelineChannel {
+  @JsonIgnore
+  private String leaderID;
+  @JsonIgnore
+  private Map<String, DatanodeDetails> datanodes;
+  private LifeCycleState lifeCycleState;
+  private ReplicationType type;
+  private ReplicationFactor factor;
+  private String name;
+
+  public PipelineChannel(String leaderID, LifeCycleState lifeCycleState,
+      ReplicationType replicationType, ReplicationFactor replicationFactor,
+      String name) {
+    this.leaderID = leaderID;
+    this.lifeCycleState = lifeCycleState;
+    this.type = replicationType;
+    this.factor = replicationFactor;
+    this.name = name;
+    datanodes = new TreeMap<>();
+  }
+
+  public String getLeaderID() {
+    return leaderID;
+  }
+
+  public Map<String, DatanodeDetails> getDatanodes() {
+    return datanodes;
+  }
+
+  public LifeCycleState getLifeCycleState() {
+    return lifeCycleState;
+  }
+
+  public ReplicationType getType() {
+    return type;
+  }
+
+  public ReplicationFactor getFactor() {
+    return factor;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public void addMember(DatanodeDetails datanodeDetails) {
+    datanodes.put(datanodeDetails.getUuid().toString(),
+        datanodeDetails);
+  }
+
+  @JsonIgnore
+  public HddsProtos.PipelineChannel getProtobufMessage() {
+    HddsProtos.PipelineChannel.Builder builder =
+        HddsProtos.PipelineChannel.newBuilder();
+    for (DatanodeDetails datanode : datanodes.values()) {
+      builder.addMembers(datanode.getProtoBufMessage());
+    }
+    builder.setLeaderID(leaderID);
+
+    if (this.getLifeCycleState() != null) {
+      builder.setState(this.getLifeCycleState());
+    }
+    if (this.getType() != null) {
+      builder.setType(this.getType());
+    }
+
+    if (this.getFactor() != null) {
+      builder.setFactor(this.getFactor());
+    }
+    return builder.build();
+  }
+
+  public static PipelineChannel getFromProtoBuf(
+      HddsProtos.PipelineChannel transportProtos) {
+    Preconditions.checkNotNull(transportProtos);
+    PipelineChannel pipelineChannel =
+        new PipelineChannel(transportProtos.getLeaderID(),
+            transportProtos.getState(),
+            transportProtos.getType(),
+            transportProtos.getFactor(),
+            transportProtos.getName());
+
+    for (HddsProtos.DatanodeDetailsProto dataID :
+        transportProtos.getMembersList()) {
+      pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID));
+    }
+    return pipelineChannel;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
new file mode 100644
index 0000000..35d8444
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+
+import java.io.IOException;
+
+/**
+ * Exceptions thrown from the Storage Container.
+ */
+public class StorageContainerException extends IOException {
+  private ContainerProtos.Result result;
+
+  /**
+   * Constructs an {@code IOException} with {@code null}
+   * as its error detail message.
+   */
+  public StorageContainerException(ContainerProtos.Result result) {
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the {@link #getMessage()} method)
+   * @param result - The result code
+   */
+  public StorageContainerException(String message,
+      ContainerProtos.Result result) {
+    super(message);
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message
+   * and cause.
+   * <p>
+   * <p> Note that the detail message associated with {@code cause} is
+   * <i>not</i> automatically incorporated into this exception's detail
+   * message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the {@link #getMessage()} method)
+   *
+   * @param cause The cause (which is saved for later retrieval by the {@link
+   * #getCause()} method).  (A null value is permitted, and indicates that the
+   * cause is nonexistent or unknown.)
+   *
+   * @param result - The result code
+   * @since 1.6
+   */
+  public StorageContainerException(String message, Throwable cause,
+      ContainerProtos.Result result) {
+    super(message, cause);
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified cause and a
+   * detail message of {@code (cause==null ? null : cause.toString())}
+   * (which typically contains the class and detail message of {@code cause}).
+   * This constructor is useful for IO exceptions that are little more
+   * than wrappers for other throwables.
+   *
+   * @param cause The cause (which is saved for later retrieval by the {@link
+   * #getCause()} method).  (A null value is permitted, and indicates that the
+   * cause is nonexistent or unknown.)
+   * @param result - The result code
+   * @since 1.6
+   */
+  public StorageContainerException(Throwable cause, ContainerProtos.Result
+      result) {
+    super(cause);
+    this.result = result;
+  }
+
+  /**
+   * Returns Result.
+   *
+   * @return Result.
+   */
+  public ContainerProtos.Result getResult() {
+    return result;
+  }
+
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
new file mode 100644
index 0000000..ffe0d3d
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+/**
+ Contains protocol buffer helper classes and utilites used in
+ impl.
+ **/
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
new file mode 100644
index 0000000..d13dcb1
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
new file mode 100644
index 0000000..3c544db
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+/**
+ * This package contains classes for the client of the storage container
+ * protocol.
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java
new file mode 100644
index 0000000..14ee3d2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+import java.util.Set;
+
+/**
+ * Holds the nodes that currently host the container for an object key hash.
+ */
+@InterfaceAudience.Private
+public final class LocatedContainer {
+  private final String key;
+  private final String matchedKeyPrefix;
+  private final String containerName;
+  private final Set<DatanodeInfo> locations;
+  private final DatanodeInfo leader;
+
+  /**
+   * Creates a LocatedContainer.
+   *
+   * @param key object key
+   * @param matchedKeyPrefix prefix of key that was used to find the location
+   * @param containerName container name
+   * @param locations nodes that currently host the container
+   * @param leader node that currently acts as pipeline leader
+   */
+  public LocatedContainer(String key, String matchedKeyPrefix,
+      String containerName, Set<DatanodeInfo> locations, DatanodeInfo leader) {
+    this.key = key;
+    this.matchedKeyPrefix = matchedKeyPrefix;
+    this.containerName = containerName;
+    this.locations = locations;
+    this.leader = leader;
+  }
+
+  /**
+   * Returns the container name.
+   *
+   * @return container name
+   */
+  public String getContainerName() {
+    return this.containerName;
+  }
+
+  /**
+   * Returns the object key.
+   *
+   * @return object key
+   */
+  public String getKey() {
+    return this.key;
+  }
+
+  /**
+   * Returns the node that currently acts as pipeline leader.
+   *
+   * @return node that currently acts as pipeline leader
+   */
+  public DatanodeInfo getLeader() {
+    return this.leader;
+  }
+
+  /**
+   * Returns the nodes that currently host the container.
+   *
+   * @return Set<DatanodeInfo> nodes that currently host the container
+   */
+  public Set<DatanodeInfo> getLocations() {
+    return this.locations;
+  }
+
+  /**
+   * Returns the prefix of the key that was used to find the location.
+   *
+   * @return prefix of the key that was used to find the location
+   */
+  public String getMatchedKeyPrefix() {
+    return this.matchedKeyPrefix;
+  }
+
+  @Override
+  public boolean equals(Object otherObj) {
+    if (otherObj == null) {
+      return false;
+    }
+    if (!(otherObj instanceof LocatedContainer)) {
+      return false;
+    }
+    LocatedContainer other = (LocatedContainer)otherObj;
+    return this.key == null ? other.key == null : this.key.equals(other.key);
+  }
+
+  @Override
+  public int hashCode() {
+    return key.hashCode();
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName()
+        + "{key=" + key
+        + "; matchedKeyPrefix=" + matchedKeyPrefix
+        + "; containerName=" + containerName
+        + "; locations=" + locations
+        + "; leader=" + leader
+        + "}";
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
new file mode 100644
index 0000000..f100fc7
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.protocol;
+
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
+ * to read/write a block.
+ */
+public interface ScmBlockLocationProtocol {
+
+  /**
+   * Find the set of nodes to read/write a block, as
+   * identified by the block key.  This method supports batch lookup by
+   * passing multiple keys.
+   *
+   * @param keys batch of block keys to find
+   * @return allocated blocks for each block key
+   * @throws IOException if there is any failure
+   */
+  Set<AllocatedBlock> getBlockLocations(Set<String> keys) throws IOException;
+
+  /**
+   * Asks SCM where a block should be allocated. SCM responds with the
+   * set of datanodes that should be used creating this block.
+   * @param size - size of the block.
+   * @return allocated block accessing info (key, pipeline).
+   * @throws IOException
+   */
+  AllocatedBlock allocateBlock(long size, ReplicationType type,
+      ReplicationFactor factor, String owner) throws IOException;
+
+  /**
+   * Delete blocks for a set of object keys.
+   *
+   * @param keyBlocksInfoList Map of object key and its blocks.
+   * @return list of block deletion results.
+   * @throws IOException if there is any failure.
+   */
+  List<DeleteBlockGroupResult>
+      deleteKeyBlocks(List<BlockGroup> keyBlocksInfoList) throws IOException;
+
+  /**
+   * Gets the Clusterid and SCM Id from SCM.
+   */
+  ScmInfo getScmInfo() throws IOException;
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java
new file mode 100644
index 0000000..6cbdee4
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Holds the nodes that currently host the block for a block key.
+ */
+@InterfaceAudience.Private
+public final class ScmLocatedBlock {
+  private final String key;
+  private final List<DatanodeInfo> locations;
+  private final DatanodeInfo leader;
+
+  /**
+   * Creates a ScmLocatedBlock.
+   *
+   * @param key object key
+   * @param locations nodes that currently host the block
+   * @param leader node that currently acts as pipeline leader
+   */
+  public ScmLocatedBlock(final String key, final List<DatanodeInfo> locations,
+      final DatanodeInfo leader) {
+    this.key = key;
+    this.locations = locations;
+    this.leader = leader;
+  }
+
+  /**
+   * Returns the object key.
+   *
+   * @return object key
+   */
+  public String getKey() {
+    return this.key;
+  }
+
+  /**
+   * Returns the node that currently acts as pipeline leader.
+   *
+   * @return node that currently acts as pipeline leader
+   */
+  public DatanodeInfo getLeader() {
+    return this.leader;
+  }
+
+  /**
+   * Returns the nodes that currently host the block.
+   *
+   * @return List<DatanodeInfo> nodes that currently host the block
+   */
+  public List<DatanodeInfo> getLocations() {
+    return this.locations;
+  }
+
+  @Override
+  public boolean equals(Object otherObj) {
+    if (otherObj == null) {
+      return false;
+    }
+    if (!(otherObj instanceof ScmLocatedBlock)) {
+      return false;
+    }
+    ScmLocatedBlock other = (ScmLocatedBlock)otherObj;
+    return this.key == null ? other.key == null : this.key.equals(other.key);
+  }
+
+  @Override
+  public int hashCode() {
+    return key.hashCode();
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + "{key=" + key + "; locations="
+        + locations.stream().map(loc -> loc.toString()).collect(Collectors
+            .joining(",")) + "; leader=" + leader + "}";
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
new file mode 100644
index 0000000..a60fbb2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.protocol;
+
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+
+/**
+ * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
+ * that currently host a container.
+ */
+public interface StorageContainerLocationProtocol {
+  /**
+   * Asks SCM where a container should be allocated. SCM responds with the
+   * set of datanodes that should be used creating this container.
+   *
+   */
+  Pipeline allocateContainer(HddsProtos.ReplicationType replicationType,
+      HddsProtos.ReplicationFactor factor, String containerName, String owner)
+      throws IOException;
+
+  /**
+   * Ask SCM the location of the container. SCM responds with a group of
+   * nodes where this container and its replicas are located.
+   *
+   * @param containerName - Name of the container.
+   * @return Pipeline - the pipeline where container locates.
+   * @throws IOException
+   */
+  Pipeline getContainer(String containerName) throws IOException;
+
+  /**
+   * Ask SCM a list of containers with a range of container names
+   * and the limit of count.
+   * Search container names between start name(exclusive), and
+   * use prefix name to filter the result. the max size of the
+   * searching range cannot exceed the value of count.
+   *
+   * @param startName start name, if null, start searching at the head.
+   * @param prefixName prefix name, if null, then filter is disabled.
+   * @param count count, if count < 0, the max size is unlimited.(
+   *              Usually the count will be replace with a very big
+   *              value instead of being unlimited in case the db is very big)
+   *
+   * @return a list of container.
+   * @throws IOException
+   */
+  List<ContainerInfo> listContainer(String startName, String prefixName,
+      int count) throws IOException;
+
+  /**
+   * Deletes a container in SCM.
+   *
+   * @param containerName
+   * @throws IOException
+   *   if failed to delete the container mapping from db store
+   *   or container doesn't exist.
+   */
+  void deleteContainer(String containerName) throws IOException;
+
+  /**
+   *  Queries a list of Node Statuses.
+   * @param nodeStatuses
+   * @return List of Datanodes.
+   */
+  HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses,
+      HddsProtos.QueryScope queryScope, String poolName) throws IOException;
+
+  /**
+   * Notify from client when begin or finish creating objects like pipeline
+   * or containers on datanodes.
+   * Container will be in Operational state after that.
+   * @param type object type
+   * @param name object name
+   * @param op operation type (e.g., create, close, delete)
+   * @param stage creation stage
+   */
+  void notifyObjectStageChange(
+      ObjectStageChangeRequestProto.Type type, String name,
+      ObjectStageChangeRequestProto.Op op,
+      ObjectStageChangeRequestProto.Stage stage) throws IOException;
+
+  /**
+   * Creates a replication pipeline of a specified type.
+   * @param type - replication type
+   * @param factor - factor 1 or 3
+   * @param nodePool - optional machine list to build a pipeline.
+   * @throws IOException
+   */
+  Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
+      HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool)
+      throws IOException;
+
+  /**
+   * Returns information about SCM.
+   *
+   * @return {@link ScmInfo}
+   * @throws IOException
+   */
+  ScmInfo getScmInfo() throws IOException;
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
new file mode 100644
index 0000000..b56a749
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.protocol;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
new file mode 100644
index 0000000..0012f3e
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.protocolPB;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .AllocateScmBlockRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .AllocateScmBlockResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteScmKeyBlocksRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteScmKeyBlocksResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .GetScmBlockLocationsRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .GetScmBlockLocationsResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .KeyBlocks;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .ScmLocatedBlockProto;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * This class is the client-side translator to translate the requests made on
+ * the {@link ScmBlockLocationProtocol} interface to the RPC server
+ * implementing {@link ScmBlockLocationProtocolPB}.
+ */
+@InterfaceAudience.Private
+public final class ScmBlockLocationProtocolClientSideTranslatorPB
+    implements ScmBlockLocationProtocol, ProtocolTranslator, Closeable {
+
+  /**
+   * RpcController is not used and hence is set to null.
+   */
+  private static final RpcController NULL_RPC_CONTROLLER = null;
+
+  private final ScmBlockLocationProtocolPB rpcProxy;
+
+  /**
+   * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
+   *
+   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
+   */
+  public ScmBlockLocationProtocolClientSideTranslatorPB(
+      ScmBlockLocationProtocolPB rpcProxy) {
+    this.rpcProxy = rpcProxy;
+  }
+
+  /**
+   * Find the set of nodes to read/write a block, as
+   * identified by the block key.  This method supports batch lookup by
+   * passing multiple keys.
+   *
+   * @param keys batch of block keys to find
+   * @return allocated blocks for each block key
+   * @throws IOException if there is any failure
+   */
+  @Override
+  public Set<AllocatedBlock> getBlockLocations(Set<String> keys)
+      throws IOException {
+    GetScmBlockLocationsRequestProto.Builder req =
+        GetScmBlockLocationsRequestProto.newBuilder();
+    for (String key : keys) {
+      req.addKeys(key);
+    }
+    final GetScmBlockLocationsResponseProto resp;
+    try {
+      resp = rpcProxy.getScmBlockLocations(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    Set<AllocatedBlock> locatedBlocks =
+        Sets.newLinkedHashSetWithExpectedSize(resp.getLocatedBlocksCount());
+    for (ScmLocatedBlockProto locatedBlock : resp.getLocatedBlocksList()) {
+      locatedBlocks.add(new AllocatedBlock.Builder()
+          .setKey(locatedBlock.getKey())
+          .setPipeline(Pipeline.getFromProtoBuf(locatedBlock.getPipeline()))
+          .build());
+    }
+    return locatedBlocks;
+  }
+
+  /**
+   * Asks SCM where a block should be allocated. SCM responds with the
+   * set of datanodes that should be used creating this block.
+   * @param size - size of the block.
+   * @return allocated block accessing info (key, pipeline).
+   * @throws IOException
+   */
+  @Override
+  public AllocatedBlock allocateBlock(long size,
+      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
+      String owner) throws IOException {
+    Preconditions.checkArgument(size > 0, "block size must be greater than 0");
+
+    AllocateScmBlockRequestProto request =
+        AllocateScmBlockRequestProto.newBuilder().setSize(size).setType(type)
+            .setFactor(factor).setOwner(owner).build();
+    final AllocateScmBlockResponseProto response;
+    try {
+      response = rpcProxy.allocateScmBlock(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (response.getErrorCode() !=
+        AllocateScmBlockResponseProto.Error.success) {
+      throw new IOException(response.hasErrorMessage() ?
+          response.getErrorMessage() : "Allocate block failed.");
+    }
+    AllocatedBlock.Builder builder = new AllocatedBlock.Builder()
+        .setKey(response.getKey())
+        .setPipeline(Pipeline.getFromProtoBuf(response.getPipeline()))
+        .setShouldCreateContainer(response.getCreateContainer());
+    return builder.build();
+  }
+
+  /**
+   * Delete the set of keys specified.
+   *
+   * @param keyBlocksInfoList batch of block keys to delete.
+   * @return list of block deletion results.
+   * @throws IOException if there is any failure.
+   *
+   */
+  @Override
+  public List<DeleteBlockGroupResult> deleteKeyBlocks(
+      List<BlockGroup> keyBlocksInfoList) throws IOException {
+    List<KeyBlocks> keyBlocksProto = keyBlocksInfoList.stream()
+        .map(BlockGroup::getProto).collect(Collectors.toList());
+    DeleteScmKeyBlocksRequestProto request = DeleteScmKeyBlocksRequestProto
+        .newBuilder().addAllKeyBlocks(keyBlocksProto).build();
+
+    final DeleteScmKeyBlocksResponseProto resp;
+    try {
+      resp = rpcProxy.deleteScmKeyBlocks(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    List<DeleteBlockGroupResult> results =
+        new ArrayList<>(resp.getResultsCount());
+    results.addAll(resp.getResultsList().stream().map(
+        result -> new DeleteBlockGroupResult(result.getObjectKey(),
+            DeleteBlockGroupResult
+                .convertBlockResultProto(result.getBlockResultsList())))
+        .collect(Collectors.toList()));
+    return results;
+  }
+
+  /**
+   * Gets the cluster Id and Scm Id from SCM.
+   * @return ScmInfo
+   * @throws IOException
+   */
+  @Override
+  public ScmInfo getScmInfo() throws IOException {
+    HddsProtos.GetScmInfoRequestProto request =
+        HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
+    HddsProtos.GetScmInfoRespsonseProto resp;
+    try {
+      resp = rpcProxy.getScmInfo(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    ScmInfo.Builder builder = new ScmInfo.Builder()
+        .setClusterId(resp.getClusterId())
+        .setScmId(resp.getScmId());
+    return builder.build();
+  }
+
+  @Override
+  public Object getUnderlyingProxyObject() {
+    return rpcProxy;
+  }
+
+  @Override
+  public void close() {
+    RPC.stopProxy(rpcProxy);
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
new file mode 100644
index 0000000..837c95b
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .ScmBlockLocationProtocolService;
+import org.apache.hadoop.ipc.ProtocolInfo;
+
+/**
+ * Protocol used from an HDFS node to StorageContainerManager.  This extends the
+ * Protocol Buffers service interface to add Hadoop-specific annotations.
+ */
+@ProtocolInfo(protocolName =
+    "org.apache.hadoop.ozone.protocol.ScmBlockLocationProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.Private
+public interface ScmBlockLocationProtocolPB
+    extends ScmBlockLocationProtocolService.BlockingInterface {
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
new file mode 100644
index 0000000..3638f63
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -0,0 +1,316 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.protocolPB;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.GetContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.GetContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.PipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.PipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ipc.RPC;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+
+/**
+ * This class is the client-side translator to translate the requests made on
+ * the {@link StorageContainerLocationProtocol} interface to the RPC server
+ * implementing {@link StorageContainerLocationProtocolPB}.
+ */
+@InterfaceAudience.Private
+public final class StorageContainerLocationProtocolClientSideTranslatorPB
+    implements StorageContainerLocationProtocol, ProtocolTranslator, Closeable {
+
+  /**
+   * RpcController is not used and hence is set to null.
+   */
+  private static final RpcController NULL_RPC_CONTROLLER = null;
+
+  private final StorageContainerLocationProtocolPB rpcProxy;
+
+  /**
+   * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
+   *
+   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
+   */
+  public StorageContainerLocationProtocolClientSideTranslatorPB(
+      StorageContainerLocationProtocolPB rpcProxy) {
+    this.rpcProxy = rpcProxy;
+  }
+
+  /**
+   * Asks SCM where a container should be allocated. SCM responds with the set
+   * of datanodes that should be used creating this container. Ozone/SCM only
+   * supports replication factor of either 1 or 3.
+   * @param type - Replication Type
+   * @param factor - Replication Count
+   * @param containerName - Name
+   * @return
+   * @throws IOException
+   */
+  @Override
+  public Pipeline allocateContainer(HddsProtos.ReplicationType type,
+      HddsProtos.ReplicationFactor factor, String
+      containerName, String owner) throws IOException {
+
+    Preconditions.checkNotNull(containerName, "Container Name cannot be Null");
+    Preconditions.checkState(!containerName.isEmpty(), "Container name cannot" +
+        " be empty");
+    ContainerRequestProto request = ContainerRequestProto.newBuilder()
+        .setContainerName(containerName)
+        .setReplicationFactor(factor)
+        .setReplicationType(type)
+        .setOwner(owner)
+        .build();
+
+    final ContainerResponseProto response;
+    try {
+      response = rpcProxy.allocateContainer(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (response.getErrorCode() != ContainerResponseProto.Error.success) {
+      throw new IOException(response.hasErrorMessage() ?
+          response.getErrorMessage() : "Allocate container failed.");
+    }
+    return Pipeline.getFromProtoBuf(response.getPipeline());
+  }
+
+  public Pipeline getContainer(String containerName) throws IOException {
+    Preconditions.checkNotNull(containerName,
+        "Container Name cannot be Null");
+    Preconditions.checkState(!containerName.isEmpty(),
+        "Container name cannot be empty");
+    GetContainerRequestProto request = GetContainerRequestProto
+        .newBuilder()
+        .setContainerName(containerName)
+        .build();
+    try {
+      GetContainerResponseProto response =
+          rpcProxy.getContainer(NULL_RPC_CONTROLLER, request);
+      return Pipeline.getFromProtoBuf(response.getPipeline());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<ContainerInfo> listContainer(String startName, String prefixName,
+      int count) throws IOException {
+    SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto
+        .newBuilder();
+    if (prefixName != null) {
+      builder.setPrefixName(prefixName);
+    }
+    if (startName != null) {
+      builder.setStartName(startName);
+    }
+    builder.setCount(count);
+    SCMListContainerRequestProto request = builder.build();
+
+    try {
+      SCMListContainerResponseProto response =
+          rpcProxy.listContainer(NULL_RPC_CONTROLLER, request);
+      List<ContainerInfo> containerList = new ArrayList<>();
+      for (HddsProtos.SCMContainerInfo containerInfoProto : response
+          .getContainersList()) {
+        containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
+      }
+      return containerList;
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  /**
+   * Ask SCM to delete a container by name. SCM will remove
+   * the container mapping in its database.
+   *
+   * @param containerName
+   * @throws IOException
+   */
+  @Override
+  public void deleteContainer(String containerName)
+      throws IOException {
+    Preconditions.checkState(!Strings.isNullOrEmpty(containerName),
+        "Container name cannot be null or empty");
+    SCMDeleteContainerRequestProto request = SCMDeleteContainerRequestProto
+        .newBuilder()
+        .setContainerName(containerName)
+        .build();
+    try {
+      rpcProxy.deleteContainer(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  /**
+   * Queries a list of Node Statuses.
+   *
+   * @param nodeStatuses
+   * @return List of Datanodes.
+   */
+  @Override
+  public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
+      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
+      throws IOException {
+    // TODO : We support only cluster wide query right now. So ignoring checking
+    // queryScope and poolName
+    Preconditions.checkNotNull(nodeStatuses);
+    Preconditions.checkState(nodeStatuses.size() > 0);
+    NodeQueryRequestProto request = NodeQueryRequestProto.newBuilder()
+        .addAllQuery(nodeStatuses)
+        .setScope(queryScope).setPoolName(poolName).build();
+    try {
+      NodeQueryResponseProto response =
+          rpcProxy.queryNode(NULL_RPC_CONTROLLER, request);
+      return response.getDatanodes();
+    } catch (ServiceException e) {
+      throw  ProtobufHelper.getRemoteException(e);
+    }
+
+  }
+
+  /**
+   * Notify from client that creates object on datanodes.
+   * @param type object type
+   * @param name object name
+   * @param op operation type (e.g., create, close, delete)
+   * @param stage object creation stage : begin/complete
+   */
+  @Override
+  public void notifyObjectStageChange(
+      ObjectStageChangeRequestProto.Type type, String name,
+      ObjectStageChangeRequestProto.Op op,
+      ObjectStageChangeRequestProto.Stage stage) throws IOException {
+    Preconditions.checkState(!Strings.isNullOrEmpty(name),
+        "Object name cannot be null or empty");
+    ObjectStageChangeRequestProto request =
+        ObjectStageChangeRequestProto.newBuilder()
+            .setType(type)
+            .setName(name)
+            .setOp(op)
+            .setStage(stage)
+            .build();
+    try {
+      rpcProxy.notifyObjectStageChange(NULL_RPC_CONTROLLER, request);
+    } catch(ServiceException e){
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  /**
+   * Creates a replication pipeline of a specified type.
+   *
+   * @param replicationType - replication type
+   * @param factor - factor 1 or 3
+   * @param nodePool - optional machine list to build a pipeline.
+   * @throws IOException
+   */
+  @Override
+  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType
+      replicationType, HddsProtos.ReplicationFactor factor, HddsProtos
+      .NodePool nodePool) throws IOException {
+    PipelineRequestProto request = PipelineRequestProto.newBuilder()
+        .setNodePool(nodePool)
+        .setReplicationFactor(factor)
+        .setReplicationType(replicationType)
+        .build();
+    try {
+      PipelineResponseProto response =
+          rpcProxy.allocatePipeline(NULL_RPC_CONTROLLER, request);
+      if (response.getErrorCode() ==
+          PipelineResponseProto.Error.success) {
+        Preconditions.checkState(response.hasPipeline(), "With success, " +
+            "must come a pipeline");
+        return Pipeline.getFromProtoBuf(response.getPipeline());
+      } else {
+        String errorMessage = String.format("create replication pipeline " +
+                "failed. code : %s Message: %s", response.getErrorCode(),
+            response.hasErrorMessage() ? response.getErrorMessage() : "");
+        throw new IOException(errorMessage);
+      }
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public ScmInfo getScmInfo() throws IOException {
+    HddsProtos.GetScmInfoRequestProto request =
+        HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
+    try {
+      HddsProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo(
+          NULL_RPC_CONTROLLER, request);
+      ScmInfo.Builder builder = new ScmInfo.Builder()
+          .setClusterId(resp.getClusterId())
+          .setScmId(resp.getScmId());
+      return builder.build();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+  }
+
+  @Override
+  public Object getUnderlyingProxyObject() {
+    return rpcProxy;
+  }
+
+  @Override
+  public void close() {
+    RPC.stopProxy(rpcProxy);
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
new file mode 100644
index 0000000..f234ad3
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos
+    .StorageContainerLocationProtocolService;
+import org.apache.hadoop.ipc.ProtocolInfo;
+
+/**
+ * Protocol used from an HDFS node to StorageContainerManager.  This extends the
+ * Protocol Buffers service interface to add Hadoop-specific annotations.
+ */
+@ProtocolInfo(protocolName =
+    "org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.Private
+public interface StorageContainerLocationProtocolPB
+    extends StorageContainerLocationProtocolService.BlockingInterface {
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
new file mode 100644
index 0000000..652ae60
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.protocolPB;
+
+/**
+ * This package contains classes for the client of the storage container
+ * protocol.
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
new file mode 100644
index 0000000..1559816
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -0,0 +1,396 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+import com.google.protobuf.ByteString;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+    .StorageContainerException;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .GetKeyResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .GetSmallFileRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .GetSmallFileResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .PutSmallFileRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadChunkResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .WriteChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+
+import java.io.IOException;
+
+/**
+ * Implementation of all container protocol calls performed by Container
+ * clients.
+ */
+public final class ContainerProtocolCalls  {
+
+  /**
+   * There is no need to instantiate this class.
+   */
+  private ContainerProtocolCalls() {
+  }
+
+  /**
+   * Calls the container protocol to get a container key.
+   *
+   * @param xceiverClient client to perform call
+   * @param containerKeyData key data to identify container
+   * @param traceID container protocol call args
+   * @return container protocol get key response
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  public static GetKeyResponseProto getKey(XceiverClientSpi xceiverClient,
+      KeyData containerKeyData, String traceID) throws IOException {
+    GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto
+        .newBuilder()
+        .setPipeline(xceiverClient.getPipeline().getProtobufMessage())
+        .setKeyData(containerKeyData);
+    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.GetKey)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setGetKey(readKeyRequest)
+        .build();
+    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
+    validateContainerResponse(response);
+    return response.getGetKey();
+  }
+
+  /**
+   * Calls the container protocol to put a container key.
+   *
+   * @param xceiverClient client to perform call
+   * @param containerKeyData key data to identify container
+   * @param traceID container protocol call args
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  public static void putKey(XceiverClientSpi xceiverClient,
+      KeyData containerKeyData, String traceID) throws IOException {
+    PutKeyRequestProto.Builder createKeyRequest = PutKeyRequestProto
+        .newBuilder()
+        .setPipeline(xceiverClient.getPipeline().getProtobufMessage())
+        .setKeyData(containerKeyData);
+    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.PutKey)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setPutKey(createKeyRequest)
+        .build();
+    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
+    validateContainerResponse(response);
+  }
+
+  /**
+   * Calls the container protocol to read a chunk.
+   *
+   * @param xceiverClient client to perform call
+   * @param chunk information about chunk to read
+   * @param key the key name
+   * @param traceID container protocol call args
+   * @return container protocol read chunk response
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  public static ReadChunkResponseProto readChunk(XceiverClientSpi xceiverClient,
+      ChunkInfo chunk, String key, String traceID)
+      throws IOException {
+    ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
+        .newBuilder()
+        .setPipeline(xceiverClient.getPipeline().getProtobufMessage())
+        .setKeyName(key)
+        .setChunkData(chunk);
+    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.ReadChunk)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setReadChunk(readChunkRequest)
+        .build();
+    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
+    validateContainerResponse(response);
+    return response.getReadChunk();
+  }
+
+  /**
+   * Calls the container protocol to write a chunk.
+   *
+   * @param xceiverClient client to perform call
+   * @param chunk information about chunk to write
+   * @param key the key name
+   * @param data the data of the chunk to write
+   * @param traceID container protocol call args
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk,
+      String key, ByteString data, String traceID)
+      throws IOException {
+    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
+        .newBuilder()
+        .setPipeline(xceiverClient.getPipeline().getProtobufMessage())
+        .setKeyName(key)
+        .setChunkData(chunk)
+        .setData(data);
+    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.WriteChunk)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setWriteChunk(writeChunkRequest)
+        .build();
+    ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
+    validateContainerResponse(response);
+  }
+
+  /**
+   * Allows writing a small file using single RPC. This takes the container
+   * name, key name and data to write sends all that data to the container using
+   * a single RPC. This API is designed to be used for files which are smaller
+   * than 1 MB.
+   *
+   * @param client - client that communicates with the container.
+   * @param containerName - Name of the container
+   * @param key - Name of the Key
+   * @param data - Data to be written into the container.
+   * @param traceID - Trace ID for logging purpose.
+   * @throws IOException
+   */
+  public static void writeSmallFile(XceiverClientSpi client,
+      String containerName, String key, byte[] data, String traceID)
+      throws IOException {
+
+    KeyData containerKeyData =
+        KeyData.newBuilder().setContainerName(containerName).setName(key)
+            .build();
+    PutKeyRequestProto.Builder createKeyRequest =
+        PutKeyRequestProto.newBuilder()
+            .setPipeline(client.getPipeline().getProtobufMessage())
+            .setKeyData(containerKeyData);
+
+    KeyValue keyValue =
+        KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
+            .build();
+    ChunkInfo chunk =
+        ChunkInfo.newBuilder().setChunkName(key + "_chunk").setOffset(0)
+            .setLen(data.length).addMetadata(keyValue).build();
+
+    PutSmallFileRequestProto putSmallFileRequest =
+        PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)
+            .setKey(createKeyRequest).setData(ByteString.copyFrom(data))
+            .build();
+
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request =
+        ContainerCommandRequestProto.newBuilder()
+            .setCmdType(Type.PutSmallFile)
+            .setTraceID(traceID)
+            .setDatanodeUuid(id)
+            .setPutSmallFile(putSmallFileRequest)
+            .build();
+    ContainerCommandResponseProto response = client.sendCommand(request);
+    validateContainerResponse(response);
+  }
+
+  /**
+   * createContainer call that creates a container on the datanode.
+   * @param client  - client
+   * @param traceID - traceID
+   * @throws IOException
+   */
+  public static void createContainer(XceiverClientSpi client, String traceID)
+      throws IOException {
+    ContainerProtos.CreateContainerRequestProto.Builder createRequest =
+        ContainerProtos.CreateContainerRequestProto
+            .newBuilder();
+    ContainerProtos.ContainerData.Builder containerData = ContainerProtos
+        .ContainerData.newBuilder();
+    containerData.setName(client.getPipeline().getContainerName());
+    createRequest.setPipeline(client.getPipeline().getProtobufMessage());
+    createRequest.setContainerData(containerData.build());
+
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(ContainerProtos.Type.CreateContainer);
+    request.setCreateContainer(createRequest);
+    request.setDatanodeUuid(id);
+    request.setTraceID(traceID);
+    ContainerCommandResponseProto response = client.sendCommand(
+        request.build());
+    validateContainerResponse(response);
+  }
+
+  /**
+   * Deletes a container from a pipeline.
+   *
+   * @param client
+   * @param force whether or not to forcibly delete the container.
+   * @param traceID
+   * @throws IOException
+   */
+  public static void deleteContainer(XceiverClientSpi client,
+      boolean force, String traceID) throws IOException {
+    ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest =
+        ContainerProtos.DeleteContainerRequestProto.newBuilder();
+    deleteRequest.setName(client.getPipeline().getContainerName());
+    deleteRequest.setPipeline(client.getPipeline().getProtobufMessage());
+    deleteRequest.setForceDelete(force);
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(ContainerProtos.Type.DeleteContainer);
+    request.setDeleteContainer(deleteRequest);
+    request.setTraceID(traceID);
+    request.setDatanodeUuid(id);
+    ContainerCommandResponseProto response =
+        client.sendCommand(request.build());
+    validateContainerResponse(response);
+  }
+
+  /**
+   * Close a container.
+   *
+   * @param client
+   * @param traceID
+   * @throws IOException
+   */
+  public static void closeContainer(XceiverClientSpi client, String traceID)
+      throws IOException {
+    ContainerProtos.CloseContainerRequestProto.Builder closeRequest =
+        ContainerProtos.CloseContainerRequestProto.newBuilder();
+    closeRequest.setPipeline(client.getPipeline().getProtobufMessage());
+
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(Type.CloseContainer);
+    request.setCloseContainer(closeRequest);
+    request.setTraceID(traceID);
+    request.setDatanodeUuid(id);
+    ContainerCommandResponseProto response =
+        client.sendCommand(request.build());
+    validateContainerResponse(response);
+  }
+
+  /**
+   * readContainer call that gets meta data from an existing container.
+   *
+   * @param client - client
+   * @param traceID - trace ID
+   * @throws IOException
+   */
+  public static ReadContainerResponseProto readContainer(
+      XceiverClientSpi client, String containerName,
+      String traceID) throws IOException {
+    ReadContainerRequestProto.Builder readRequest =
+        ReadContainerRequestProto.newBuilder();
+    readRequest.setName(containerName);
+    readRequest.setPipeline(client.getPipeline().getProtobufMessage());
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(Type.ReadContainer);
+    request.setReadContainer(readRequest);
+    request.setDatanodeUuid(id);
+    request.setTraceID(traceID);
+    ContainerCommandResponseProto response =
+        client.sendCommand(request.build());
+    validateContainerResponse(response);
+    return response.getReadContainer();
+  }
+
+  /**
+   * Reads the data given the container name and key.
+   *
+   * @param client
+   * @param containerName - name of the container
+   * @param key - key
+   * @param traceID - trace ID
+   * @return GetSmallFileResponseProto
+   * @throws IOException
+   */
+  public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client,
+      String containerName, String key, String traceID) throws IOException {
+    KeyData containerKeyData = KeyData
+        .newBuilder()
+        .setContainerName(containerName)
+        .setName(key).build();
+
+    GetKeyRequestProto.Builder getKey = GetKeyRequestProto
+        .newBuilder()
+        .setPipeline(client.getPipeline().getProtobufMessage())
+        .setKeyData(containerKeyData);
+    ContainerProtos.GetSmallFileRequestProto getSmallFileRequest =
+        GetSmallFileRequestProto
+            .newBuilder().setKey(getKey)
+            .build();
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.GetSmallFile)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setGetSmallFile(getSmallFileRequest)
+        .build();
+    ContainerCommandResponseProto response = client.sendCommand(request);
+    validateContainerResponse(response);
+    return response.getGetSmallFile();
+  }
+
+  /**
+   * Validates a response from a container protocol call.  Any non-successful
+   * return code is mapped to a corresponding exception and thrown.
+   *
+   * @param response container protocol call response
+   * @throws IOException if the container protocol call failed
+   */
+  private static void validateContainerResponse(
+      ContainerCommandResponseProto response
+  ) throws StorageContainerException {
+    if (response.getResult() == ContainerProtos.Result.SUCCESS) {
+      return;
+    }
+    throw new StorageContainerException(
+        response.getMessage(), response.getResult());
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
new file mode 100644
index 0000000..8e98158
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+/**
+ * This package contains StorageContainerManager classes.
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
new file mode 100644
index 0000000..ff0ac4e
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone;
+
+import java.util.Objects;
+
+/**
+ * OzoneACL classes define bucket ACLs used in OZONE.
+ *
+ * ACLs in Ozone follow this pattern.
+ * • user:name:rw
+ * • group:name:rw
+ * • world::rw
+ */
+public class OzoneAcl {
+  private OzoneACLType type;
+  private String name;
+  private OzoneACLRights rights;
+
+  /**
+   * Constructor for OzoneAcl.
+   */
+  public OzoneAcl() {
+  }
+
+  /**
+   * Constructor for OzoneAcl.
+   *
+   * @param type - Type
+   * @param name - Name of user
+   * @param rights - Rights
+   */
+  public OzoneAcl(OzoneACLType type, String name, OzoneACLRights rights) {
+    this.name = name;
+    this.rights = rights;
+    this.type = type;
+    if (type == OzoneACLType.WORLD && name.length() != 0) {
+      throw new IllegalArgumentException("Unexpected name part in world type");
+    }
+    if (((type == OzoneACLType.USER) || (type == OzoneACLType.GROUP))
+        && (name.length() == 0)) {
+      throw new IllegalArgumentException("User or group name is required");
+    }
+  }
+
+  /**
+   * Parses an ACL string and returns the ACL object.
+   *
+   * @param acl - Acl String , Ex. user:anu:rw
+   *
+   * @return - Ozone ACLs
+   */
+  public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException {
+    if ((acl == null) || acl.isEmpty()) {
+      throw new IllegalArgumentException("ACLs cannot be null or empty");
+    }
+    String[] parts = acl.trim().split(":");
+    if (parts.length < 3) {
+      throw new IllegalArgumentException("ACLs are not in expected format");
+    }
+
+    OzoneACLType aclType = OzoneACLType.valueOf(parts[0].toUpperCase());
+    OzoneACLRights rights = OzoneACLRights.getACLRight(parts[2].toLowerCase());
+
+    // TODO : Support sanitation of these user names by calling into
+    // userAuth Interface.
+    return new OzoneAcl(aclType, parts[1], rights);
+  }
+
+  @Override
+  public String toString() {
+    return type + ":" + name + ":" + OzoneACLRights.getACLRightsString(rights);
+  }
+
+  /**
+   * Returns a hash code value for the object. This method is
+   * supported for the benefit of hash tables.
+   *
+   * @return a hash code value for this object.
+   *
+   * @see Object#equals(Object)
+   * @see System#identityHashCode
+   */
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.getName(), this.getRights().toString(),
+                        this.getType().toString());
+  }
+
+  /**
+   * Returns name.
+   *
+   * @return name
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * Returns Rights.
+   *
+   * @return - Rights
+   */
+  public OzoneACLRights getRights() {
+    return rights;
+  }
+
+  /**
+   * Returns Type.
+   *
+   * @return type
+   */
+  public OzoneACLType getType() {
+    return type;
+  }
+
+  /**
+   * Indicates whether some other object is "equal to" this one.
+   *
+   * @param obj the reference object with which to compare.
+   *
+   * @return {@code true} if this object is the same as the obj
+   * argument; {@code false} otherwise.
+   */
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    OzoneAcl otherAcl = (OzoneAcl) obj;
+    return otherAcl.getName().equals(this.getName()) &&
+        otherAcl.getRights() == this.getRights() &&
+        otherAcl.getType() == this.getType();
+  }
+
+  /**
+   * ACL types.
+   */
+  public enum OzoneACLType {
+    USER(OzoneConsts.OZONE_ACL_USER_TYPE),
+    GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE),
+    WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE);
+
+    /**
+     * String value for this Enum.
+     */
+    private final String value;
+
+    /**
+     * Init OzoneACLtypes enum.
+     *
+     * @param val String type for this enum.
+     */
+    OzoneACLType(String val) {
+      value = val;
+    }
+  }
+
+  /**
+   * ACL rights.
+   */
+  public enum OzoneACLRights {
+    READ, WRITE, READ_WRITE;
+
+    /**
+     * Returns the ACL rights based on passed in String.
+     *
+     * @param type ACL right string
+     *
+     * @return OzoneACLRights
+     */
+    public static OzoneACLRights getACLRight(String type) {
+      if (type == null || type.isEmpty()) {
+        throw new IllegalArgumentException("ACL right cannot be empty");
+      }
+
+      switch (type) {
+      case OzoneConsts.OZONE_ACL_READ:
+        return OzoneACLRights.READ;
+      case OzoneConsts.OZONE_ACL_WRITE:
+        return OzoneACLRights.WRITE;
+      case OzoneConsts.OZONE_ACL_READ_WRITE:
+      case OzoneConsts.OZONE_ACL_WRITE_READ:
+        return OzoneACLRights.READ_WRITE;
+      default:
+        throw new IllegalArgumentException("ACL right is not recognized");
+      }
+
+    }
+
+    /**
+     * Returns String representation of ACL rights.
+     * @param acl OzoneACLRights
+     * @return String representation of acl
+     */
+    public static String getACLRightsString(OzoneACLRights acl) {
+      switch(acl) {
+      case READ:
+        return OzoneConsts.OZONE_ACL_READ;
+      case WRITE:
+        return OzoneConsts.OZONE_ACL_WRITE;
+      case READ_WRITE:
+        return OzoneConsts.OZONE_ACL_READ_WRITE;
+      default:
+        throw new IllegalArgumentException("ACL right is not recognized");
+      }
+    }
+
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
new file mode 100644
index 0000000..72531a2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -0,0 +1,241 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+
+/**
+ * This class contains constants for configuration keys used in Ozone.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public final class OzoneConfigKeys {
+  public static final String DFS_CONTAINER_IPC_PORT =
+      "dfs.container.ipc";
+  public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
+
+  /**
+   *
+   * When set to true, allocate a random free port for ozone container,
+   * so that a mini cluster is able to launch multiple containers on a node.
+   *
+   * When set to false (default), container port is fixed as specified by
+   * DFS_CONTAINER_IPC_PORT_DEFAULT.
+   */
+  public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
+      "dfs.container.ipc.random.port";
+  public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
+      false;
+
+  /**
+   * Ratis Port where containers listen to.
+   */
+  public static final String DFS_CONTAINER_RATIS_IPC_PORT =
+      "dfs.container.ratis.ipc";
+  public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
+
+  /**
+   * When set to true, allocate a random free port for ozone container, so that
+   * a mini cluster is able to launch multiple containers on a node.
+   */
+  public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
+      "dfs.container.ratis.ipc.random.port";
+  public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
+      false;
+
+  public static final String OZONE_LOCALSTORAGE_ROOT =
+      "ozone.localstorage.root";
+  public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone";
+  public static final String OZONE_ENABLED =
+      "ozone.enabled";
+  public static final boolean OZONE_ENABLED_DEFAULT = false;
+  public static final String OZONE_HANDLER_TYPE_KEY =
+      "ozone.handler.type";
+  public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed";
+  public static final String OZONE_TRACE_ENABLED_KEY =
+      "ozone.trace.enabled";
+  public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
+
+  public static final String OZONE_METADATA_DIRS =
+      "ozone.metadata.dirs";
+
+  public static final String OZONE_METADATA_STORE_IMPL =
+      "ozone.metastore.impl";
+  public static final String OZONE_METADATA_STORE_IMPL_LEVELDB =
+      "LevelDB";
+  public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB =
+      "RocksDB";
+  public static final String OZONE_METADATA_STORE_IMPL_DEFAULT =
+      OZONE_METADATA_STORE_IMPL_ROCKSDB;
+
+  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS =
+      "ozone.metastore.rocksdb.statistics";
+
+  public static final String  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT =
+      "ALL";
+  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF =
+      "OFF";
+
+  public static final String OZONE_CONTAINER_CACHE_SIZE =
+      "ozone.container.cache.size";
+  public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024;
+
+  public static final String OZONE_SCM_BLOCK_SIZE_IN_MB =
+      "ozone.scm.block.size.in.mb";
+  public static final long OZONE_SCM_BLOCK_SIZE_DEFAULT = 256;
+
+  /**
+   * Ozone administrator users delimited by comma.
+   * If not set, only the user who launches an ozone service will be the
+   * admin user. This property must be set if ozone services are started by
+   * different users. Otherwise the RPC layer will reject calls from
+   * other servers which are started by users not in the list.
+   * */
+  public static final String OZONE_ADMINISTRATORS =
+      "ozone.administrators";
+
+  public static final String OZONE_CLIENT_PROTOCOL =
+      "ozone.client.protocol";
+
+  // This defines the overall connection limit for the connection pool used in
+  // RestClient.
+  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX =
+      "ozone.rest.client.http.connection.max";
+  public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100;
+
+  // This defines the connection limit per one HTTP route/host.
+  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX =
+      "ozone.rest.client.http.connection.per-route.max";
+
+  public static final int
+      OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
+
+  public static final String OZONE_CLIENT_SOCKET_TIMEOUT =
+      "ozone.client.socket.timeout";
+  public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000;
+  public static final String OZONE_CLIENT_CONNECTION_TIMEOUT =
+      "ozone.client.connection.timeout";
+  public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000;
+
+  public static final String OZONE_REPLICATION = "ozone.replication";
+  public static final int OZONE_REPLICATION_DEFAULT =
+      ReplicationFactor.THREE.getValue();
+
+  public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type";
+  public static final String OZONE_REPLICATION_TYPE_DEFAULT =
+      ReplicationType.RATIS.toString();
+
+  /**
+   * Configuration property to configure the cache size of client list calls.
+   */
+  public static final String OZONE_CLIENT_LIST_CACHE_SIZE =
+      "ozone.client.list.cache";
+  public static final int OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT = 1000;
+
+  /**
+   * Configuration properties for Ozone Block Deleting Service.
+   */
+  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL =
+      "ozone.block.deleting.service.interval";
+  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT
+      = "60s";
+
+  /**
+   * The interval of open key clean service.
+   */
+  public static final String OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS =
+      "ozone.open.key.cleanup.service.interval.seconds";
+  public static final int
+      OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT
+      = 24 * 3600; // a total of 24 hour
+
+  /**
+   * An open key gets cleaned up when it is being in open state for too long.
+   */
+  public static final String OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS =
+      "ozone.open.key.expire.threshold";
+  public static final int OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT =
+      24 * 3600;
+
+  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT =
+      "ozone.block.deleting.service.timeout";
+  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
+      = "300s"; // 300s for default
+
+  public static final String OZONE_KEY_PREALLOCATION_MAXSIZE =
+      "ozone.key.preallocation.maxsize";
+  public static final long OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT
+      = 128 * OzoneConsts.MB;
+
+  public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER =
+      "ozone.block.deleting.limit.per.task";
+  public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT
+      = 1000;
+
+  public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL
+      = "ozone.block.deleting.container.limit.per.interval";
+  public static final int
+      OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
+
+  public static final String OZONE_CONTAINER_REPORT_INTERVAL =
+      "ozone.container.report.interval";
+  public static final String OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT =
+      "60s";
+
+  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
+  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY;
+  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY;
+  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
+  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
+  public static final int DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
+  public static final int DFS_CONTAINER_CHUNK_MAX_SIZE
+      = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
+  public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
+      "dfs.container.ratis.datanode.storage.dir";
+
+  public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+      "ozone.web.authentication.kerberos.principal";
+
+  public static final String HDDS_DATANODE_PLUGINS_KEY =
+      "hdds.datanode.plugins";
+
+  /**
+   * There is no need to instantiate this class.
+   */
+  private OzoneConfigKeys() {
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
new file mode 100644
index 0000000..2f9e469
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Set of constants used in Ozone implementation.
+ */
+@InterfaceAudience.Private
+public final class OzoneConsts {
+
+
+  public static final String STORAGE_DIR = "scm";
+  public static final String SCM_ID = "scmUuid";
+
+  public static final String OZONE_SIMPLE_ROOT_USER = "root";
+  public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
+
+  /*
+   * BucketName length is used for both buckets and volume lengths
+   */
+  public static final int OZONE_MIN_BUCKET_NAME_LENGTH = 3;
+  public static final int OZONE_MAX_BUCKET_NAME_LENGTH = 63;
+
+  public static final String OZONE_ACL_USER_TYPE = "user";
+  public static final String OZONE_ACL_GROUP_TYPE = "group";
+  public static final String OZONE_ACL_WORLD_TYPE = "world";
+
+  public static final String OZONE_ACL_READ = "r";
+  public static final String OZONE_ACL_WRITE = "w";
+  public static final String OZONE_ACL_READ_WRITE = "rw";
+  public static final String OZONE_ACL_WRITE_READ = "wr";
+
+  public static final String OZONE_DATE_FORMAT =
+      "EEE, dd MMM yyyy HH:mm:ss zzz";
+  public static final String OZONE_TIME_ZONE = "GMT";
+
+  public static final String OZONE_COMPONENT = "component";
+  public static final String OZONE_FUNCTION  = "function";
+  public static final String OZONE_RESOURCE = "resource";
+  public static final String OZONE_USER = "user";
+  public static final String OZONE_REQUEST = "request";
+
+  public static final String CONTAINER_EXTENSION = ".container";
+  public static final String CONTAINER_META = ".meta";
+
+  //  container storage is in the following format.
+  //  Data Volume basePath/containers/<containerName>/metadata and
+  //  Data Volume basePath/containers/<containerName>/data/...
+  public static final String CONTAINER_PREFIX  = "containers";
+  public static final String CONTAINER_META_PATH = "metadata";
+  public static final String CONTAINER_DATA_PATH = "data";
+  public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
+  public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
+  public static final String CONTAINER_ROOT_PREFIX = "repository";
+
+  public static final String FILE_HASH = "SHA-256";
+  public final static String CHUNK_OVERWRITE = "OverWriteRequested";
+
+  public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB
+  public static final long KB = 1024L;
+  public static final long MB = KB * 1024L;
+  public static final long GB = MB * 1024L;
+  public static final long TB = GB * 1024L;
+
+  /**
+   * level DB names used by SCM and data nodes.
+   */
+  public static final String CONTAINER_DB_SUFFIX = "container.db";
+  public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
+  public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
+  public static final String BLOCK_DB = "block.db";
+  public static final String NODEPOOL_DB = "nodepool.db";
+  public static final String OPEN_CONTAINERS_DB = "openContainers.db";
+  public static final String DELETED_BLOCK_DB = "deletedBlock.db";
+  public static final String KSM_DB_NAME = "ksm.db";
+
+  /**
+   * Supports Bucket Versioning.
+   */
+  public enum Versioning {NOT_DEFINED, ENABLED, DISABLED}
+
+  /**
+   * Ozone handler types.
+   */
+  public static final String OZONE_HANDLER_DISTRIBUTED = "distributed";
+  public static final String OZONE_HANDLER_LOCAL = "local";
+
+  public static final String DELETING_KEY_PREFIX = "#deleting#";
+  public static final String OPEN_KEY_PREFIX = "#open#";
+  public static final String OPEN_KEY_ID_DELIMINATOR = "#";
+
+  /**
+   * KSM LevelDB prefixes.
+   *
+   * KSM DB stores metadata as KV pairs with certain prefixes,
+   * prefix is used to improve the performance to get related
+   * metadata.
+   *
+   * KSM DB Schema:
+   *  ----------------------------------------------------------
+   *  |  KEY                                     |     VALUE   |
+   *  ----------------------------------------------------------
+   *  | $userName                                |  VolumeList |
+   *  ----------------------------------------------------------
+   *  | /#volumeName                             |  VolumeInfo |
+   *  ----------------------------------------------------------
+   *  | /#volumeName/#bucketName                 |  BucketInfo |
+   *  ----------------------------------------------------------
+   *  | /volumeName/bucketName/keyName           |  KeyInfo    |
+   *  ----------------------------------------------------------
+   *  | #deleting#/volumeName/bucketName/keyName |  KeyInfo    |
+   *  ----------------------------------------------------------
+   */
+  public static final String KSM_VOLUME_PREFIX = "/#";
+  public static final String KSM_BUCKET_PREFIX = "/#";
+  public static final String KSM_KEY_PREFIX = "/";
+  public static final String KSM_USER_PREFIX = "$";
+
+  /**
+   * Max KSM Quota size of 1024 PB.
+   */
+  public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
+
+  /**
+   * Max number of keys returned per list buckets operation.
+   */
+  public static final int MAX_LISTBUCKETS_SIZE  = 1024;
+
+  /**
+   * Max number of keys returned per list keys operation.
+   */
+  public static final int MAX_LISTKEYS_SIZE  = 1024;
+
+  /**
+   * Max number of volumes returned per list volumes operation.
+   */
+  public static final int MAX_LISTVOLUMES_SIZE = 1024;
+
+  public static final int INVALID_PORT = -1;
+
+
+  // The ServiceListJSONServlet context attribute where KeySpaceManager
+  // instance gets stored.
+  public static final String KSM_CONTEXT_ATTRIBUTE = "ozone.ksm";
+
+  private OzoneConsts() {
+    // Never Constructed
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
new file mode 100644
index 0000000..38ce6cc
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .KeyBlocks;
+
+import java.util.List;
+
+/**
+ * A group of blocks relations relevant, e.g belong to a certain object key.
+ */
+public final class BlockGroup {
+
+  private String groupID;
+  private List<String> blockIDs;
+  private BlockGroup(String groupID, List<String> blockIDs) {
+    this.groupID = groupID;
+    this.blockIDs = blockIDs;
+  }
+
+  public List<String> getBlockIDList() {
+    return blockIDs;
+  }
+
+  public String getGroupID() {
+    return groupID;
+  }
+
+  public KeyBlocks getProto() {
+    return KeyBlocks.newBuilder().setKey(groupID)
+        .addAllBlocks(blockIDs).build();
+  }
+
+  /**
+   * Parses a KeyBlocks proto to a group of blocks.
+   * @param proto KeyBlocks proto.
+   * @return a group of blocks.
+   */
+  public static BlockGroup getFromProto(KeyBlocks proto) {
+    return BlockGroup.newBuilder().setKeyName(proto.getKey())
+        .addAllBlockIDs(proto.getBlocksList()).build();
+  }
+
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * BlockGroup instance builder.
+   */
+  public static class Builder {
+
+    private String groupID;
+    private List<String> blockIDs;
+
+    public Builder setKeyName(String blockGroupID) {
+      this.groupID = blockGroupID;
+      return this;
+    }
+
+    public Builder addAllBlockIDs(List<String> keyBlocks) {
+      this.blockIDs = keyBlocks;
+      return this;
+    }
+
+    public BlockGroup build() {
+      return new BlockGroup(groupID, blockIDs);
+    }
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
new file mode 100644
index 0000000..ec54ac5
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteScmBlockResult;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteScmBlockResult.Result;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Result to delete a group of blocks.
+ */
+public class DeleteBlockGroupResult {
+  private String objectKey;
+  private List<DeleteBlockResult> blockResultList;
+  public DeleteBlockGroupResult(String objectKey,
+      List<DeleteBlockResult> blockResultList) {
+    this.objectKey = objectKey;
+    this.blockResultList = blockResultList;
+  }
+
+  public String getObjectKey() {
+    return objectKey;
+  }
+
+  public List<DeleteBlockResult> getBlockResultList() {
+    return blockResultList;
+  }
+
+  public List<DeleteScmBlockResult> getBlockResultProtoList() {
+    List<DeleteScmBlockResult> resultProtoList =
+        new ArrayList<>(blockResultList.size());
+    for (DeleteBlockResult result : blockResultList) {
+      DeleteScmBlockResult proto = DeleteScmBlockResult.newBuilder()
+          .setKey(result.getKey())
+          .setResult(result.getResult()).build();
+      resultProtoList.add(proto);
+    }
+    return resultProtoList;
+  }
+
+  public static List<DeleteBlockResult> convertBlockResultProto(
+      List<DeleteScmBlockResult> results) {
+    List<DeleteBlockResult> protoResults = new ArrayList<>(results.size());
+    for (DeleteScmBlockResult result : results) {
+      protoResults.add(new DeleteBlockResult(result.getKey(),
+          result.getResult()));
+    }
+    return protoResults;
+  }
+
+  /**
+   * Only if all blocks are successfully deleted, this group is considered
+   * to be successfully executed.
+   *
+   * @return true if all blocks are successfully deleted, false otherwise.
+   */
+  public boolean isSuccess() {
+    for (DeleteBlockResult result : blockResultList) {
+      if (result.getResult() != Result.success) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * @return A list of deletion failed block IDs.
+   */
+  public List<String> getFailedBlocks() {
+    List<String> failedBlocks = blockResultList.stream()
+        .filter(result -> result.getResult() != Result.success)
+        .map(DeleteBlockResult::getKey).collect(Collectors.toList());
+    return failedBlocks;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
new file mode 100644
index 0000000..518b519
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
@@ -0,0 +1,51 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * The exception is thrown when file system state is inconsistent
+ * and is not recoverable.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class InconsistentStorageStateException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public InconsistentStorageStateException(String descr) {
+    super(descr);
+  }
+
+  public InconsistentStorageStateException(File dir, String descr) {
+    super("Directory " + getFilePath(dir) + " is in an inconsistent state: "
+        + descr);
+  }
+
+  private static String getFilePath(File dir) {
+    try {
+      return dir.getCanonicalPath();
+    } catch (IOException e) {
+    }
+    return dir.getPath();
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
new file mode 100644
index 0000000..fb30d92
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -0,0 +1,248 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Properties;
+
+/**
+ * Storage information file. This Class defines the methods to check
+ * the consistency of the storage dir and the version file.
+ * <p>
+ * Local storage information is stored in a separate file VERSION.
+ * It contains type of the node,
+ * the storage layout version, the SCM id, and
+ * the KSM/SCM state creation time.
+ *
+ */
+@InterfaceAudience.Private
+public abstract class Storage {
+  private static final Logger LOG = LoggerFactory.getLogger(Storage.class);
+
+  protected static final String STORAGE_DIR_CURRENT = "current";
+  protected static final String STORAGE_FILE_VERSION = "VERSION";
+
+  private final NodeType nodeType;
+  private final File root;
+  private final File storageDir;
+
+  private StorageState state;
+  private StorageInfo storageInfo;
+
+
+  /**
+   * Determines the state of the Version file.
+   */
+  public enum StorageState {
+    NON_EXISTENT, NOT_INITIALIZED, INITIALIZED
+  }
+
+  public Storage(NodeType type, File root, String sdName)
+      throws IOException {
+    this.nodeType = type;
+    this.root = root;
+    this.storageDir = new File(root, sdName);
+    this.state = getStorageState();
+    if (state == StorageState.INITIALIZED) {
+      this.storageInfo = new StorageInfo(type, getVersionFile());
+    } else {
+      this.storageInfo = new StorageInfo(
+          nodeType, StorageInfo.newClusterID(), Time.now());
+      setNodeProperties();
+    }
+  }
+
+  /**
+   * Gets the path of the Storage dir.
+   * @return Stoarge dir path
+   */
+  public String getStorageDir() {
+    return storageDir.getAbsoluteFile().toString();
+  }
+
+  /**
+   * Gets the state of the version file.
+   * @return the state of the Version file
+   */
+  public StorageState getState() {
+    return state;
+  }
+
+  public NodeType getNodeType() {
+    return storageInfo.getNodeType();
+  }
+
+  public String getClusterID() {
+    return storageInfo.getClusterID();
+  }
+
+  public long getCreationTime() {
+    return storageInfo.getCreationTime();
+  }
+
+  public void setClusterId(String clusterId) throws IOException {
+    if (state == StorageState.INITIALIZED) {
+      throw new IOException(
+          "Storage directory " + storageDir + " already initialized.");
+    } else {
+      storageInfo.setClusterId(clusterId);
+    }
+  }
+
+  /**
+   * Retreives the storageInfo instance to read/write the common
+   * version file properties.
+   * @return the instance of the storageInfo class
+   */
+  protected StorageInfo getStorageInfo() {
+    return storageInfo;
+  }
+
+  abstract protected Properties getNodeProperties();
+
+  /**
+   * Sets the Node properties spaecific to KSM/SCM.
+   */
+  private void setNodeProperties() {
+    Properties nodeProperties = getNodeProperties();
+    if (nodeProperties != null) {
+      for (String key : nodeProperties.stringPropertyNames()) {
+        storageInfo.setProperty(key, nodeProperties.getProperty(key));
+      }
+    }
+  }
+
+  /**
+   * Directory {@code current} contains latest files defining
+   * the file system meta-data.
+   *
+   * @return the directory path
+   */
+  private File getCurrentDir() {
+    return new File(storageDir, STORAGE_DIR_CURRENT);
+  }
+
+  /**
+   * File {@code VERSION} contains the following fields:
+   * <ol>
+   * <li>node type</li>
+   * <li>KSM/SCM state creation time</li>
+   * <li>other fields specific for this node type</li>
+   * </ol>
+   * The version file is always written last during storage directory updates.
+   * The existence of the version file indicates that all other files have
+   * been successfully written in the storage directory, the storage is valid
+   * and does not need to be recovered.
+   *
+   * @return the version file path
+   */
+  private File getVersionFile() {
+    return new File(getCurrentDir(), STORAGE_FILE_VERSION);
+  }
+
+
+  /**
+   * Check to see if current/ directory is empty. This method is used
+   * before determining to format the directory.
+   * @throws IOException if unable to list files under the directory.
+   */
+  private void checkEmptyCurrent() throws IOException {
+    File currentDir = getCurrentDir();
+    if (!currentDir.exists()) {
+      // if current/ does not exist, it's safe to format it.
+      return;
+    }
+    try (DirectoryStream<Path> dirStream = Files
+        .newDirectoryStream(currentDir.toPath())) {
+      if (dirStream.iterator().hasNext()) {
+        throw new InconsistentStorageStateException(getCurrentDir(),
+            "Can't initialize the storage directory because the current "
+                + "it is not empty.");
+      }
+    }
+  }
+
+  /**
+   * Check consistency of the storage directory.
+   *
+   * @return state {@link StorageState} of the storage directory
+   * @throws IOException
+   */
+  private StorageState getStorageState() throws IOException {
+    assert root != null : "root is null";
+    String rootPath = root.getCanonicalPath();
+    try { // check that storage exists
+      if (!root.exists()) {
+        // storage directory does not exist
+        LOG.warn("Storage directory " + rootPath + " does not exist");
+        return StorageState.NON_EXISTENT;
+      }
+      // or is inaccessible
+      if (!root.isDirectory()) {
+        LOG.warn(rootPath + "is not a directory");
+        return StorageState.NON_EXISTENT;
+      }
+      if (!FileUtil.canWrite(root)) {
+        LOG.warn("Cannot access storage directory " + rootPath);
+        return StorageState.NON_EXISTENT;
+      }
+    } catch (SecurityException ex) {
+      LOG.warn("Cannot access storage directory " + rootPath, ex);
+      return StorageState.NON_EXISTENT;
+    }
+
+    // check whether current directory is valid
+    File versionFile = getVersionFile();
+    boolean hasCurrent = versionFile.exists();
+
+    if (hasCurrent) {
+      return StorageState.INITIALIZED;
+    } else {
+      checkEmptyCurrent();
+      return StorageState.NOT_INITIALIZED;
+    }
+  }
+
+  /**
+   * Creates the Version file if not present,
+   * otherwise returns with IOException.
+   * @throws IOException
+   */
+  public void initialize() throws IOException {
+    if (state == StorageState.INITIALIZED) {
+      throw new IOException("Storage directory already initialized.");
+    }
+    if (!getCurrentDir().mkdirs()) {
+      throw new IOException("Cannot create directory " + getCurrentDir());
+    }
+    storageInfo.writeTo(getVersionFile());
+  }
+
+}
+
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
new file mode 100644
index 0000000..0e98a4c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Properties;
+import java.util.UUID;
+
+/**
+ * Common class for storage information. This class defines the common
+ * properties and functions to set them , write them into the version file
+ * and read them from the version file.
+ *
+ */
+@InterfaceAudience.Private
+public class StorageInfo {
+
+  private Properties properties = new Properties();
+
+  /**
+   * Property to hold node type.
+   */
+  private static final String NODE_TYPE = "nodeType";
+  /**
+   * Property to hold ID of the cluster.
+   */
+  private static final String CLUSTER_ID = "clusterID";
+  /**
+   * Property to hold creation time of the storage.
+   */
+  private static final String CREATION_TIME = "cTime";
+
+  /**
+   * Constructs StorageInfo instance.
+   * @param type
+   *          Type of the node using the storage
+   * @param cid
+   *          Cluster ID
+   * @param cT
+   *          Cluster creation Time
+
+   * @throws IOException
+   */
+  public StorageInfo(NodeType type, String cid, long cT)
+      throws IOException {
+    Preconditions.checkNotNull(type);
+    Preconditions.checkNotNull(cid);
+    Preconditions.checkNotNull(cT);
+    properties.setProperty(NODE_TYPE, type.name());
+    properties.setProperty(CLUSTER_ID, cid);
+    properties.setProperty(CREATION_TIME, String.valueOf(cT));
+  }
+
+  public StorageInfo(NodeType type, File propertiesFile)
+      throws IOException {
+    this.properties = readFrom(propertiesFile);
+    verifyNodeType(type);
+    verifyClusterId();
+    verifyCreationTime();
+  }
+
+  public NodeType getNodeType() {
+    return NodeType.valueOf(properties.getProperty(NODE_TYPE));
+  }
+
+  public String getClusterID() {
+    return properties.getProperty(CLUSTER_ID);
+  }
+
+  public Long  getCreationTime() {
+    String creationTime = properties.getProperty(CREATION_TIME);
+    if(creationTime != null) {
+      return Long.parseLong(creationTime);
+    }
+    return null;
+  }
+
+  public String getProperty(String key) {
+    return properties.getProperty(key);
+  }
+
+  public void setProperty(String key, String value) {
+    properties.setProperty(key, value);
+  }
+
+  public void setClusterId(String clusterId) {
+    properties.setProperty(CLUSTER_ID, clusterId);
+  }
+
+  private void verifyNodeType(NodeType type)
+      throws InconsistentStorageStateException {
+    NodeType nodeType = getNodeType();
+    Preconditions.checkNotNull(nodeType);
+    if(type != nodeType) {
+      throw new InconsistentStorageStateException("Expected NodeType: " + type +
+          ", but found: " + nodeType);
+    }
+  }
+
+  private void verifyClusterId()
+      throws InconsistentStorageStateException {
+    String clusterId = getClusterID();
+    Preconditions.checkNotNull(clusterId);
+    if(clusterId.isEmpty()) {
+      throw new InconsistentStorageStateException("Cluster ID not found");
+    }
+  }
+
+  private void verifyCreationTime() {
+    Long creationTime = getCreationTime();
+    Preconditions.checkNotNull(creationTime);
+  }
+
+
+  public void writeTo(File to)
+      throws IOException {
+    try (RandomAccessFile file = new RandomAccessFile(to, "rws");
+         FileOutputStream out = new FileOutputStream(file.getFD())) {
+      file.seek(0);
+    /*
+     * If server is interrupted before this line,
+     * the version file will remain unchanged.
+     */
+      properties.store(out, null);
+    /*
+     * Now the new fields are flushed to the head of the file, but file
+     * length can still be larger then required and therefore the file can
+     * contain whole or corrupted fields from its old contents in the end.
+     * If server is interrupted here and restarted later these extra fields
+     * either should not effect server behavior or should be handled
+     * by the server correctly.
+     */
+      file.setLength(out.getChannel().position());
+    }
+  }
+
+  private Properties readFrom(File from) throws IOException {
+    try (RandomAccessFile file = new RandomAccessFile(from, "rws");
+        FileInputStream in = new FileInputStream(file.getFD())) {
+      Properties props = new Properties();
+      file.seek(0);
+      props.load(in);
+      return props;
+    }
+  }
+
+  /**
+   * Generate new clusterID.
+   *
+   * clusterID is a persistent attribute of the cluster.
+   * It is generated when the cluster is created and remains the same
+   * during the life cycle of the cluster.  When a new SCM node is initialized,
+   * if this is a new cluster, a new clusterID is generated and stored.
+   * @return new clusterID
+   */
+  public static String newClusterID() {
+    return "CID-" + UUID.randomUUID().toString();
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
new file mode 100644
index 0000000..6517e58
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
new file mode 100644
index 0000000..9aeff24
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.common.statemachine;
+
+/**
+ * Class wraps invalid state transition exception.
+ */
+public class InvalidStateTransitionException extends Exception {
+  private Enum<?> currentState;
+  private Enum<?> event;
+
+  public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) {
+    super("Invalid event: " + event + " at " + currentState + " state.");
+    this.currentState = currentState;
+    this.event = event;
+  }
+
+  public Enum<?> getCurrentState() {
+    return currentState;
+  }
+
+  public Enum<?> getEvent() {
+    return event;
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
new file mode 100644
index 0000000..bf8cbd5
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.common.statemachine;
+
+import com.google.common.base.Supplier;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Template class that wraps simple event driven state machine.
+ * @param <STATE> states allowed
+ * @param <EVENT> events allowed
+ */
+public class StateMachine<STATE extends Enum<?>, EVENT extends Enum<?>> {
+  private STATE initialState;
+  private Set<STATE> finalStates;
+
+  private final LoadingCache<EVENT, Map<STATE, STATE>> transitions =
+      CacheBuilder.newBuilder().build(
+          CacheLoader.from((Supplier<Map<STATE, STATE>>) () -> new HashMap()));
+
+  public StateMachine(STATE initState, Set<STATE> finalStates) {
+    this.initialState = initState;
+    this.finalStates = finalStates;
+  }
+
+  public STATE getInitialState() {
+    return initialState;
+  }
+
+  public Set<STATE> getFinalStates() {
+    return finalStates;
+  }
+
+  public STATE getNextState(STATE from, EVENT e)
+      throws InvalidStateTransitionException {
+    STATE target = transitions.getUnchecked(e).get(from);
+    if (target == null) {
+      throw new InvalidStateTransitionException(from, e);
+    }
+    return target;
+  }
+
+  public void addTransition(STATE from, STATE to, EVENT e) {
+    transitions.getUnchecked(e).put(from, to);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
new file mode 100644
index 0000000..045409e
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common.statemachine;
+/**
+ state machine template class for ozone.
+ **/
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
new file mode 100644
index 0000000..aa1fe74
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * Java class that represents ChunkInfo ProtoBuf class. This helper class allows
+ * us to convert to and from protobuf to normal java.
+ */
+public class ChunkInfo {
+  private final String chunkName;
+  private final long offset;
+  private final long len;
+  private String checksum;
+  private final Map<String, String> metadata;
+
+
+  /**
+   * Constructs a ChunkInfo.
+   *
+   * @param chunkName - File Name where chunk lives.
+   * @param offset    - offset where Chunk Starts.
+   * @param len       - Length of the Chunk.
+   */
+  public ChunkInfo(String chunkName, long offset, long len) {
+    this.chunkName = chunkName;
+    this.offset = offset;
+    this.len = len;
+    this.metadata = new TreeMap<>();
+  }
+
+  /**
+   * Adds metadata.
+   *
+   * @param key   - Key Name.
+   * @param value - Value.
+   * @throws IOException
+   */
+  public void addMetadata(String key, String value) throws IOException {
+    synchronized (this.metadata) {
+      if (this.metadata.containsKey(key)) {
+        throw new IOException("This key already exists. Key " + key);
+      }
+      metadata.put(key, value);
+    }
+  }
+
+  /**
+   * Gets a Chunkinfo class from the protobuf definitions.
+   *
+   * @param info - Protobuf class
+   * @return ChunkInfo
+   * @throws IOException
+   */
+  public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info)
+      throws IOException {
+    Preconditions.checkNotNull(info);
+
+    ChunkInfo chunkInfo = new ChunkInfo(info.getChunkName(), info.getOffset(),
+        info.getLen());
+
+    for (int x = 0; x < info.getMetadataCount(); x++) {
+      chunkInfo.addMetadata(info.getMetadata(x).getKey(),
+          info.getMetadata(x).getValue());
+    }
+
+
+    if (info.hasChecksum()) {
+      chunkInfo.setChecksum(info.getChecksum());
+    }
+    return chunkInfo;
+  }
+
+  /**
+   * Returns a ProtoBuf Message from ChunkInfo.
+   *
+   * @return Protocol Buffer Message
+   */
+  public ContainerProtos.ChunkInfo getProtoBufMessage() {
+    ContainerProtos.ChunkInfo.Builder builder = ContainerProtos
+        .ChunkInfo.newBuilder();
+
+    builder.setChunkName(this.getChunkName());
+    builder.setOffset(this.getOffset());
+    builder.setLen(this.getLen());
+    if (this.getChecksum() != null && !this.getChecksum().isEmpty()) {
+      builder.setChecksum(this.getChecksum());
+    }
+
+    for (Map.Entry<String, String> entry : metadata.entrySet()) {
+      HddsProtos.KeyValue.Builder keyValBuilder =
+          HddsProtos.KeyValue.newBuilder();
+      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
+          .setValue(entry.getValue()).build());
+    }
+
+    return builder.build();
+  }
+
+  /**
+   * Returns the chunkName.
+   *
+   * @return - String
+   */
+  public String getChunkName() {
+    return chunkName;
+  }
+
+  /**
+   * Gets the start offset of the given chunk in physical file.
+   *
+   * @return - long
+   */
+  public long getOffset() {
+    return offset;
+  }
+
+  /**
+   * Returns the length of the Chunk.
+   *
+   * @return long
+   */
+  public long getLen() {
+    return len;
+  }
+
+  /**
+   * Returns the SHA256 value of this chunk.
+   *
+   * @return - Hash String
+   */
+  public String getChecksum() {
+    return checksum;
+  }
+
+  /**
+   * Sets the Hash value of this chunk.
+   *
+   * @param checksum - Hash String.
+   */
+  public void setChecksum(String checksum) {
+    this.checksum = checksum;
+  }
+
+  /**
+   * Returns Metadata associated with this Chunk.
+   *
+   * @return - Map of Key,values.
+   */
+  public Map<String, String> getMetadata() {
+    return metadata;
+  }
+
+  @Override
+  public String toString() {
+    return "ChunkInfo{" +
+        "chunkName='" + chunkName +
+        ", offset=" + offset +
+        ", len=" + len +
+        '}';
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
new file mode 100644
index 0000000..be546c7
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * Helper class to convert Protobuf to Java classes.
+ */
+public class KeyData {
+  private final String containerName;
+  private final String keyName;
+  private final Map<String, String> metadata;
+
+  /**
+   * Please note : when we are working with keys, we don't care what they point
+   * to. So we We don't read chunkinfo nor validate them. It is responsibility
+   * of higher layer like ozone. We just read and write data from network.
+   */
+  private List<ContainerProtos.ChunkInfo> chunks;
+
+  /**
+   * Constructs a KeyData Object.
+   *
+   * @param containerName
+   * @param keyName
+   */
+  public KeyData(String containerName, String keyName) {
+    this.containerName = containerName;
+    this.keyName = keyName;
+    this.metadata = new TreeMap<>();
+  }
+
+  /**
+   * Returns a keyData object from the protobuf data.
+   *
+   * @param data - Protobuf data.
+   * @return - KeyData
+   * @throws IOException
+   */
+  public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws
+      IOException {
+    KeyData keyData = new KeyData(data.getContainerName(), data.getName());
+    for (int x = 0; x < data.getMetadataCount(); x++) {
+      keyData.addMetadata(data.getMetadata(x).getKey(),
+          data.getMetadata(x).getValue());
+    }
+    keyData.setChunks(data.getChunksList());
+    return keyData;
+  }
+
+  /**
+   * Returns a Protobuf message from KeyData.
+   * @return Proto Buf Message.
+   */
+  public ContainerProtos.KeyData getProtoBufMessage() {
+    ContainerProtos.KeyData.Builder builder =
+        ContainerProtos.KeyData.newBuilder();
+    builder.setContainerName(this.containerName);
+    builder.setName(this.getKeyName());
+    builder.addAllChunks(this.chunks);
+    for (Map.Entry<String, String> entry : metadata.entrySet()) {
+      HddsProtos.KeyValue.Builder keyValBuilder =
+          HddsProtos.KeyValue.newBuilder();
+      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
+          .setValue(entry.getValue()).build());
+    }
+    return builder.build();
+  }
+
+  /**
+   * Adds metadata.
+   *
+   * @param key   - Key
+   * @param value - Value
+   * @throws IOException
+   */
+  public synchronized void addMetadata(String key, String value) throws
+      IOException {
+    if (this.metadata.containsKey(key)) {
+      throw new IOException("This key already exists. Key " + key);
+    }
+    metadata.put(key, value);
+  }
+
+  public synchronized Map<String, String> getMetadata() {
+    return Collections.unmodifiableMap(this.metadata);
+  }
+
+  /**
+   * Returns value of a key.
+   */
+  public synchronized String getValue(String key) {
+    return metadata.get(key);
+  }
+
+  /**
+   * Deletes a metadata entry from the map.
+   *
+   * @param key - Key
+   */
+  public synchronized void deleteKey(String key) {
+    metadata.remove(key);
+  }
+
+  /**
+   * Returns chunks list.
+   *
+   * @return list of chunkinfo.
+   */
+  public List<ContainerProtos.ChunkInfo> getChunks() {
+    return chunks;
+  }
+
+  /**
+   * Returns container Name.
+   * @return String.
+   */
+  public String getContainerName() {
+    return containerName;
+  }
+
+  /**
+   * Returns KeyName.
+   * @return String.
+   */
+  public String getKeyName() {
+    return keyName;
+  }
+
+  /**
+   * Sets Chunk list.
+   *
+   * @param chunks - List of chunks.
+   */
+  public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
+    this.chunks = chunks;
+  }
+
+  /**
+   * Get the total size of chunks allocated for the key.
+   * @return total size of the key.
+   */
+  public long getSize() {
+    return chunks.parallelStream().mapToLong(e->e.getLen()).sum();
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
new file mode 100644
index 0000000..fa5df11
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+/**
+ * Helper classes for the container protocol communication.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
new file mode 100644
index 0000000..dfa9315
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.lease;
+
+import org.apache.hadoop.util.Time;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * This class represents the lease created on a resource. Callback can be
+ * registered on the lease which will be executed in case of timeout.
+ *
+ * @param <T> Resource type for which the lease can be associated
+ */
+public class Lease<T> {
+
+  /**
+   * The resource for which this lease is created.
+   */
+  private final T resource;
+
+  private final long creationTime;
+
+  /**
+   * Lease lifetime in milliseconds.
+   */
+  private volatile long leaseTimeout;
+
+  private boolean expired;
+
+  /**
+   * Functions to be called in case of timeout.
+   */
+  private List<Callable<Void>> callbacks;
+
+
+  /**
+   * Creates a lease on the specified resource with given timeout.
+   *
+   * @param resource
+   *        Resource for which the lease has to be created
+   * @param timeout
+   *        Lease lifetime in milliseconds
+   */
+  public Lease(T resource, long timeout) {
+    this.resource = resource;
+    this.leaseTimeout = timeout;
+    this.callbacks = Collections.synchronizedList(new ArrayList<>());
+    this.creationTime = Time.monotonicNow();
+    this.expired = false;
+  }
+
+  /**
+   * Returns true if the lease has expired, else false.
+   *
+   * @return true if expired, else false
+   */
+  public boolean hasExpired() {
+    return expired;
+  }
+
+  /**
+   * Registers a callback which will be executed in case of timeout. Callbacks
+   * are executed in a separate Thread.
+   *
+   * @param callback
+   *        The Callable which has to be executed
+   * @throws LeaseExpiredException
+   *         If the lease has already timed out
+   */
+  public void registerCallBack(Callable<Void> callback)
+      throws LeaseExpiredException {
+    if(hasExpired()) {
+      throw new LeaseExpiredException("Resource: " + resource);
+    }
+    callbacks.add(callback);
+  }
+
+  /**
+   * Returns the time elapsed since the creation of lease.
+   *
+   * @return elapsed time in milliseconds
+   * @throws LeaseExpiredException
+   *         If the lease has already timed out
+   */
+  public long getElapsedTime() throws LeaseExpiredException {
+    if(hasExpired()) {
+      throw new LeaseExpiredException("Resource: " + resource);
+    }
+    return Time.monotonicNow() - creationTime;
+  }
+
+  /**
+   * Returns the time available before timeout.
+   *
+   * @return remaining time in milliseconds
+   * @throws LeaseExpiredException
+   *         If the lease has already timed out
+   */
+  public long getRemainingTime() throws LeaseExpiredException {
+    if(hasExpired()) {
+      throw new LeaseExpiredException("Resource: " + resource);
+    }
+    return leaseTimeout - getElapsedTime();
+  }
+
+  /**
+   * Returns total lease lifetime.
+   *
+   * @return total lifetime of lease in milliseconds
+   * @throws LeaseExpiredException
+   *         If the lease has already timed out
+   */
+  public long getLeaseLifeTime() throws LeaseExpiredException {
+    if(hasExpired()) {
+      throw new LeaseExpiredException("Resource: " + resource);
+    }
+    return leaseTimeout;
+  }
+
+  /**
+   * Renews the lease timeout period.
+   *
+   * @param timeout
+   *        Time to be added to the lease in milliseconds
+   * @throws LeaseExpiredException
+   *         If the lease has already timed out
+   */
+  public void renew(long timeout) throws LeaseExpiredException {
+    if(hasExpired()) {
+      throw new LeaseExpiredException("Resource: " + resource);
+    }
+    leaseTimeout += timeout;
+  }
+
+  @Override
+  public int hashCode() {
+    return resource.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if(obj instanceof Lease) {
+      return resource.equals(((Lease) obj).resource);
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return "Lease<" + resource.toString() + ">";
+  }
+
+  /**
+   * Returns the callbacks to be executed for the lease in case of timeout.
+   *
+   * @return callbacks to be executed
+   */
+  List<Callable<Void>> getCallbacks() {
+    return callbacks;
+  }
+
+  /**
+   * Expires/Invalidates the lease.
+   */
+  void invalidate() {
+    callbacks = null;
+    expired = true;
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java
new file mode 100644
index 0000000..a39ea22
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.lease;
+
+/**
+ * This exception represents that there is already a lease acquired on the
+ * same resource.
+ */
+public class LeaseAlreadyExistException  extends LeaseException {
+
+  /**
+   * Constructs an {@code LeaseAlreadyExistException} with {@code null}
+   * as its error detail message.
+   */
+  public LeaseAlreadyExistException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code LeaseAlreadyExistException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public LeaseAlreadyExistException(String message) {
+    super(message);
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
new file mode 100644
index 0000000..1b7391b
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.lease;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * This class is responsible for executing the callbacks of a lease in case of
+ * timeout.
+ */
+public class LeaseCallbackExecutor<T> implements Runnable {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Lease.class);
+
+  private final T resource;
+  private final List<Callable<Void>> callbacks;
+
+  /**
+   * Constructs LeaseCallbackExecutor instance with list of callbacks.
+   *
+   * @param resource
+   *        The resource for which the callbacks are executed
+   * @param callbacks
+   *        Callbacks to be executed by this executor
+   */
+  public LeaseCallbackExecutor(T resource, List<Callable<Void>> callbacks) {
+    this.resource = resource;
+    this.callbacks = callbacks;
+  }
+
+  @Override
+  public void run() {
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Executing callbacks for lease on {}", resource);
+    }
+    for(Callable<Void> callback : callbacks) {
+      try {
+        callback.call();
+      } catch (Exception e) {
+        LOG.warn("Exception while executing callback for lease on {}",
+            resource, e);
+      }
+    }
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java
new file mode 100644
index 0000000..418f412
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.lease;
+
+/**
+ * This exception represents all lease related exceptions.
+ */
+public class LeaseException extends Exception {
+
+  /**
+   * Constructs an {@code LeaseException} with {@code null}
+   * as its error detail message.
+   */
+  public LeaseException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code LeaseException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public LeaseException(String message) {
+    super(message);
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java
new file mode 100644
index 0000000..440a023
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.lease;
+
+/**
+ * This exception represents that the lease that is being accessed has expired.
+ */
+public class LeaseExpiredException extends LeaseException {
+
+  /**
+   * Constructs an {@code LeaseExpiredException} with {@code null}
+   * as its error detail message.
+   */
+  public LeaseExpiredException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code LeaseExpiredException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public LeaseExpiredException(String message) {
+    super(message);
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
new file mode 100644
index 0000000..b8390dd
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
@@ -0,0 +1,247 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.lease;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+/**
+ * LeaseManager is someone who can provide you leases based on your
+ * requirement. If you want to return the lease back before it expires,
+ * you can give it back to Lease Manager. He is the one responsible for
+ * the lifecycle of leases. The resource for which lease is created
+ * should have proper {@code equals} method implementation, resource
+ * equality is checked while the lease is created.
+ *
+ * @param <T> Type of leases that this lease manager can create
+ */
+public class LeaseManager<T> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(LeaseManager.class);
+
+  private final long defaultTimeout;
+  private Map<T, Lease<T>> activeLeases;
+  private LeaseMonitor leaseMonitor;
+  private Thread leaseMonitorThread;
+  private boolean isRunning;
+
+  /**
+   * Creates an instance of lease manager.
+   *
+   * @param defaultTimeout
+   *        Default timeout in milliseconds to be used for lease creation.
+   */
+  public LeaseManager(long defaultTimeout) {
+    this.defaultTimeout = defaultTimeout;
+  }
+
+  /**
+   * Starts the lease manager service.
+   */
+  public void start() {
+    LOG.debug("Starting LeaseManager service");
+    activeLeases = new ConcurrentHashMap<>();
+    leaseMonitor = new LeaseMonitor();
+    leaseMonitorThread = new Thread(leaseMonitor);
+    leaseMonitorThread.setName("LeaseManager#LeaseMonitor");
+    leaseMonitorThread.setDaemon(true);
+    leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> {
+      // Let us just restart this thread after logging an error.
+      // if this thread is not running we cannot handle Lease expiry.
+      LOG.error("LeaseMonitor thread encountered an error. Thread: {}",
+          thread.toString(), throwable);
+      leaseMonitorThread.start();
+    });
+    LOG.debug("Starting LeaseManager#LeaseMonitor Thread");
+    leaseMonitorThread.start();
+    isRunning = true;
+  }
+
+  /**
+   * Returns a lease for the specified resource with default timeout.
+   *
+   * @param resource
+   *        Resource for which lease has to be created
+   * @throws LeaseAlreadyExistException
+   *         If there is already a lease on the resource
+   */
+  public synchronized Lease<T> acquire(T resource)
+      throws LeaseAlreadyExistException {
+    return acquire(resource, defaultTimeout);
+  }
+
+  /**
+   * Returns a lease for the specified resource with the timeout provided.
+   *
+   * @param resource
+   *        Resource for which lease has to be created
+   * @param timeout
+   *        The timeout in milliseconds which has to be set on the lease
+   * @throws LeaseAlreadyExistException
+   *         If there is already a lease on the resource
+   */
+  public synchronized Lease<T> acquire(T resource, long timeout)
+      throws LeaseAlreadyExistException {
+    checkStatus();
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Acquiring lease on {} for {} milliseconds", resource, timeout);
+    }
+    if(activeLeases.containsKey(resource)) {
+      throw new LeaseAlreadyExistException("Resource: " + resource);
+    }
+    Lease<T> lease = new Lease<>(resource, timeout);
+    activeLeases.put(resource, lease);
+    leaseMonitorThread.interrupt();
+    return lease;
+  }
+
+  /**
+   * Returns a lease associated with the specified resource.
+   *
+   * @param resource
+   *        Resource for which the lease has to be returned
+   * @throws LeaseNotFoundException
+   *         If there is no active lease on the resource
+   */
+  public Lease<T> get(T resource) throws LeaseNotFoundException {
+    checkStatus();
+    Lease<T> lease = activeLeases.get(resource);
+    if(lease != null) {
+      return lease;
+    }
+    throw new LeaseNotFoundException("Resource: " + resource);
+  }
+
+  /**
+   * Releases the lease associated with the specified resource.
+   *
+   * @param resource
+   *        The for which the lease has to be released
+   * @throws LeaseNotFoundException
+   *         If there is no active lease on the resource
+   */
+  public synchronized void release(T resource)
+      throws LeaseNotFoundException {
+    checkStatus();
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Releasing lease on {}", resource);
+    }
+    Lease<T> lease = activeLeases.remove(resource);
+    if(lease == null) {
+      throw new LeaseNotFoundException("Resource: " + resource);
+    }
+    lease.invalidate();
+  }
+
+  /**
+   * Shuts down the LeaseManager and releases the resources. All the active
+   * {@link Lease} will be released (callbacks on leases will not be
+   * executed).
+   */
+  public void shutdown() {
+    checkStatus();
+    LOG.debug("Shutting down LeaseManager service");
+    leaseMonitor.disable();
+    leaseMonitorThread.interrupt();
+    for(T resource : activeLeases.keySet()) {
+      try {
+        release(resource);
+      }  catch(LeaseNotFoundException ex) {
+        //Ignore the exception, someone might have released the lease
+      }
+    }
+    isRunning = false;
+  }
+
+  /**
+   * Throws {@link LeaseManagerNotRunningException} if the service is not
+   * running.
+   */
+  private void checkStatus() {
+    if(!isRunning) {
+      throw new LeaseManagerNotRunningException("LeaseManager not running.");
+    }
+  }
+
+  /**
+   * Monitors the leases and expires them based on the timeout, also
+   * responsible for executing the callbacks of expired leases.
+   */
+  private final class LeaseMonitor implements Runnable {
+
+    private boolean monitor = true;
+    private ExecutorService executorService;
+
+    private LeaseMonitor() {
+      this.monitor = true;
+      this.executorService = Executors.newCachedThreadPool();
+    }
+
+    @Override
+    public void run() {
+      while(monitor) {
+        LOG.debug("LeaseMonitor: checking for lease expiry");
+        long sleepTime = Long.MAX_VALUE;
+
+        for (T resource : activeLeases.keySet()) {
+          try {
+            Lease<T> lease = get(resource);
+            long remainingTime = lease.getRemainingTime();
+            if (remainingTime <= 0) {
+              //Lease has timed out
+              List<Callable<Void>> leaseCallbacks = lease.getCallbacks();
+              release(resource);
+              executorService.execute(
+                  new LeaseCallbackExecutor(resource, leaseCallbacks));
+            } else {
+              sleepTime = remainingTime > sleepTime ?
+                  sleepTime : remainingTime;
+            }
+          } catch (LeaseNotFoundException | LeaseExpiredException ex) {
+            //Ignore the exception, someone might have released the lease
+          }
+        }
+
+        try {
+          if(!Thread.interrupted()) {
+            Thread.sleep(sleepTime);
+          }
+        } catch (InterruptedException ignored) {
+          // This means a new lease is added to activeLeases.
+        }
+      }
+    }
+
+    /**
+     * Disables lease monitor, next interrupt call on the thread
+     * will stop lease monitor.
+     */
+    public void disable() {
+      monitor = false;
+    }
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java
new file mode 100644
index 0000000..ced31de
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.lease;
+
+/**
+ * This exception represents that there LeaseManager service is not running.
+ */
+public class LeaseManagerNotRunningException  extends RuntimeException {
+
+  /**
+   * Constructs an {@code LeaseManagerNotRunningException} with {@code null}
+   * as its error detail message.
+   */
+  public LeaseManagerNotRunningException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code LeaseManagerNotRunningException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public LeaseManagerNotRunningException(String message) {
+    super(message);
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java
new file mode 100644
index 0000000..c292d33
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.lease;
+
+/**
+ * This exception represents that the lease that is being accessed does not
+ * exist.
+ */
+public class LeaseNotFoundException extends LeaseException {
+
+  /**
+   * Constructs an {@code LeaseNotFoundException} with {@code null}
+   * as its error detail message.
+   */
+  public LeaseNotFoundException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code LeaseNotFoundException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public LeaseNotFoundException(String message) {
+    super(message);
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java
new file mode 100644
index 0000000..48ee2e1
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+/**
+ * A generic lease management API which can be used if a service
+ * needs any kind of lease management.
+ */
+
+package org.apache.hadoop.ozone.lease;
+/*
+ This package contains lease management related classes.
+ */
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java
new file mode 100644
index 0000000..db399db
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+/**
+ This package contains class that support ozone implementation on the datanode
+ side.
+
+ Main parts of ozone on datanode are:
+
+ 1. REST Interface - This code lives under the web directory and listens to the
+ WebHDFS port.
+
+ 2. Datanode container classes: This support persistence of ozone objects on
+ datanode. These classes live under container directory.
+
+ 3. Client and Shell: We also support a ozone REST client lib, they are under
+ web/client and web/ozShell.
+
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..fa79341
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+import com.google.common.collect.Sets;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .AllocateScmBlockRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .AllocateScmBlockResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteKeyBlocksResultProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteScmKeyBlocksRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteScmKeyBlocksResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .GetScmBlockLocationsRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .GetScmBlockLocationsResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .ScmLocatedBlockProto;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * This class is the server-side translator that forwards requests received on
+ * {@link StorageContainerLocationProtocolPB} to the
+ * {@link StorageContainerLocationProtocol} server implementation.
+ */
+@InterfaceAudience.Private
+public final class ScmBlockLocationProtocolServerSideTranslatorPB
+    implements ScmBlockLocationProtocolPB {
+
+  private final ScmBlockLocationProtocol impl;
+
+  /**
+   * Creates a new ScmBlockLocationProtocolServerSideTranslatorPB.
+   *
+   * @param impl {@link ScmBlockLocationProtocol} server implementation
+   */
+  public ScmBlockLocationProtocolServerSideTranslatorPB(
+      ScmBlockLocationProtocol impl) throws IOException {
+    this.impl = impl;
+  }
+
+
+  @Override
+  public GetScmBlockLocationsResponseProto getScmBlockLocations(
+      RpcController controller, GetScmBlockLocationsRequestProto req)
+      throws ServiceException {
+    Set<String> keys = Sets.newLinkedHashSetWithExpectedSize(
+        req.getKeysCount());
+    for (String key : req.getKeysList()) {
+      keys.add(key);
+    }
+    final Set<AllocatedBlock> blocks;
+    try {
+      blocks = impl.getBlockLocations(keys);
+    } catch (IOException ex) {
+      throw new ServiceException(ex);
+    }
+    GetScmBlockLocationsResponseProto.Builder resp =
+        GetScmBlockLocationsResponseProto.newBuilder();
+    for (AllocatedBlock block: blocks) {
+      ScmLocatedBlockProto.Builder locatedBlock =
+          ScmLocatedBlockProto.newBuilder()
+              .setKey(block.getKey())
+              .setPipeline(block.getPipeline().getProtobufMessage());
+      resp.addLocatedBlocks(locatedBlock.build());
+    }
+    return resp.build();
+  }
+
+  @Override
+  public AllocateScmBlockResponseProto allocateScmBlock(
+      RpcController controller, AllocateScmBlockRequestProto request)
+      throws ServiceException {
+    try {
+      AllocatedBlock allocatedBlock =
+          impl.allocateBlock(request.getSize(), request.getType(),
+              request.getFactor(), request.getOwner());
+      if (allocatedBlock != null) {
+        return
+            AllocateScmBlockResponseProto.newBuilder()
+                .setKey(allocatedBlock.getKey())
+                .setPipeline(allocatedBlock.getPipeline().getProtobufMessage())
+                .setCreateContainer(allocatedBlock.getCreateContainer())
+                .setErrorCode(AllocateScmBlockResponseProto.Error.success)
+                .build();
+      } else {
+        return AllocateScmBlockResponseProto.newBuilder()
+            .setErrorCode(AllocateScmBlockResponseProto.Error.unknownFailure)
+            .build();
+      }
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public DeleteScmKeyBlocksResponseProto deleteScmKeyBlocks(
+      RpcController controller, DeleteScmKeyBlocksRequestProto req)
+      throws ServiceException {
+    DeleteScmKeyBlocksResponseProto.Builder resp =
+        DeleteScmKeyBlocksResponseProto.newBuilder();
+    try {
+      List<BlockGroup> infoList = req.getKeyBlocksList().stream()
+          .map(BlockGroup::getFromProto).collect(Collectors.toList());
+      final List<DeleteBlockGroupResult> results =
+          impl.deleteKeyBlocks(infoList);
+      for (DeleteBlockGroupResult result: results) {
+        DeleteKeyBlocksResultProto.Builder deleteResult =
+            DeleteKeyBlocksResultProto
+            .newBuilder()
+            .setObjectKey(result.getObjectKey())
+            .addAllBlockResults(result.getBlockResultProtoList());
+        resp.addResults(deleteResult.build());
+      }
+    } catch (IOException ex) {
+      throw new ServiceException(ex);
+    }
+    return resp.build();
+  }
+
+  @Override
+  public HddsProtos.GetScmInfoRespsonseProto getScmInfo(
+      RpcController controller, HddsProtos.GetScmInfoRequestProto req)
+      throws ServiceException {
+    ScmInfo scmInfo;
+    try {
+      scmInfo = impl.getScmInfo();
+    } catch (IOException ex) {
+      throw new ServiceException(ex);
+    }
+    return HddsProtos.GetScmInfoRespsonseProto.newBuilder()
+        .setClusterId(scmInfo.getClusterId())
+        .setScmId(scmInfo.getScmId())
+        .build();
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..4974268
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -0,0 +1,212 @@
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.GetContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.GetContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.PipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.PipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+
+/**
+ * This class is the server-side translator that forwards requests received on
+ * {@link StorageContainerLocationProtocolPB} to the
+ * {@link StorageContainerLocationProtocol} server implementation.
+ */
+@InterfaceAudience.Private
+public final class StorageContainerLocationProtocolServerSideTranslatorPB
+    implements StorageContainerLocationProtocolPB {
+
+  private final StorageContainerLocationProtocol impl;
+
+  /**
+   * Creates a new StorageContainerLocationProtocolServerSideTranslatorPB.
+   *
+   * @param impl {@link StorageContainerLocationProtocol} server implementation
+   */
+  public StorageContainerLocationProtocolServerSideTranslatorPB(
+      StorageContainerLocationProtocol impl) throws IOException {
+    this.impl = impl;
+  }
+
+  @Override
+  public ContainerResponseProto allocateContainer(RpcController unused,
+      ContainerRequestProto request) throws ServiceException {
+    try {
+      Pipeline pipeline = impl.allocateContainer(request.getReplicationType(),
+          request.getReplicationFactor(), request.getContainerName(),
+          request.getOwner());
+      return ContainerResponseProto.newBuilder()
+          .setPipeline(pipeline.getProtobufMessage())
+          .setErrorCode(ContainerResponseProto.Error.success)
+          .build();
+
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetContainerResponseProto getContainer(
+      RpcController controller, GetContainerRequestProto request)
+      throws ServiceException {
+    try {
+      Pipeline pipeline = impl.getContainer(request.getContainerName());
+      return GetContainerResponseProto.newBuilder()
+          .setPipeline(pipeline.getProtobufMessage())
+          .build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public SCMListContainerResponseProto listContainer(RpcController controller,
+      SCMListContainerRequestProto request) throws ServiceException {
+    try {
+      String startName = null;
+      String prefixName = null;
+      int count = -1;
+
+      // Arguments check.
+      if (request.hasPrefixName()) {
+        // End container name is given.
+        prefixName = request.getPrefixName();
+      }
+      if (request.hasStartName()) {
+        // End container name is given.
+        startName = request.getStartName();
+      }
+
+      count = request.getCount();
+      List<ContainerInfo> containerList =
+          impl.listContainer(startName, prefixName, count);
+      SCMListContainerResponseProto.Builder builder =
+          SCMListContainerResponseProto.newBuilder();
+      for (ContainerInfo container : containerList) {
+        builder.addContainers(container.getProtobuf());
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public SCMDeleteContainerResponseProto deleteContainer(
+      RpcController controller, SCMDeleteContainerRequestProto request)
+      throws ServiceException {
+    try {
+      impl.deleteContainer(request.getContainerName());
+      return SCMDeleteContainerResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public StorageContainerLocationProtocolProtos.NodeQueryResponseProto
+      queryNode(RpcController controller,
+      StorageContainerLocationProtocolProtos.NodeQueryRequestProto request)
+      throws ServiceException {
+    try {
+      EnumSet<HddsProtos.NodeState> nodeStateEnumSet = EnumSet.copyOf(request
+          .getQueryList());
+      HddsProtos.NodePool datanodes = impl.queryNode(nodeStateEnumSet,
+          request.getScope(), request.getPoolName());
+      return StorageContainerLocationProtocolProtos
+          .NodeQueryResponseProto.newBuilder()
+          .setDatanodes(datanodes)
+          .build();
+    } catch (Exception e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public ObjectStageChangeResponseProto notifyObjectStageChange(
+      RpcController controller, ObjectStageChangeRequestProto request)
+      throws ServiceException {
+    try {
+      impl.notifyObjectStageChange(request.getType(), request.getName(),
+          request.getOp(), request.getStage());
+      return ObjectStageChangeResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public PipelineResponseProto allocatePipeline(
+      RpcController controller, PipelineRequestProto request)
+      throws ServiceException {
+    // TODO : Wiring this up requires one more patch.
+    return null;
+  }
+
+  @Override
+  public HddsProtos.GetScmInfoRespsonseProto getScmInfo(
+      RpcController controller, HddsProtos.GetScmInfoRequestProto req)
+      throws ServiceException {
+    try {
+      ScmInfo scmInfo = impl.getScmInfo();
+      return HddsProtos.GetScmInfoRespsonseProto.newBuilder()
+          .setClusterId(scmInfo.getClusterId())
+          .setScmId(scmInfo.getScmId())
+          .build();
+    } catch (IOException ex) {
+      throw new ServiceException(ex);
+    }
+
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
new file mode 100644
index 0000000..860386d
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.protocolPB;
+
+/**
+ * This package contains classes for the Protocol Buffers binding of Ozone
+ * protocols.
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
new file mode 100644
index 0000000..af56da3
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.utils;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.fasterxml.jackson.databind.type.CollectionType;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * JSON Utility functions used in ozone.
+ */
+public final class JsonUtils {
+
+  // Reuse ObjectMapper instance for improving performance.
+  // ObjectMapper is thread safe as long as we always configure instance
+  // before use.
+  private static final ObjectMapper MAPPER = new ObjectMapper();
+  private static final ObjectReader READER = MAPPER.readerFor(Object.class);
+  private static final ObjectWriter WRITTER =
+      MAPPER.writerWithDefaultPrettyPrinter();
+
+  private JsonUtils() {
+    // Never constructed
+  }
+
+  public static String toJsonStringWithDefaultPrettyPrinter(String jsonString)
+      throws IOException {
+    Object json = READER.readValue(jsonString);
+    return WRITTER.writeValueAsString(json);
+  }
+
+  public static String toJsonString(Object obj) throws IOException {
+    return MAPPER.writeValueAsString(obj);
+  }
+
+  /**
+   * Deserialize a list of elements from a given string,
+   * each element in the list is in the given type.
+   *
+   * @param str json string.
+   * @param elementType element type.
+   * @return List of elements of type elementType
+   * @throws IOException
+   */
+  public static List<?> toJsonList(String str, Class<?> elementType)
+      throws IOException {
+    CollectionType type = MAPPER.getTypeFactory()
+        .constructCollectionType(List.class, elementType);
+    return MAPPER.readValue(str, type);
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
new file mode 100644
index 0000000..e5812c0
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.utils;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
new file mode 100644
index 0000000..431da64
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * An abstract class for a background service in ozone.
+ * A background service schedules multiple child tasks in parallel
+ * in a certain period. In each interval, it waits until all the tasks
+ * finish execution and then schedule next interval.
+ */
+public abstract class BackgroundService {
+
+  @VisibleForTesting
+  public static final Logger LOG =
+      LoggerFactory.getLogger(BackgroundService.class);
+
+  // Executor to launch child tasks
+  private final ScheduledExecutorService exec;
+  private final ThreadGroup threadGroup;
+  private final ThreadFactory threadFactory;
+  private final String serviceName;
+  private final long interval;
+  private final long serviceTimeout;
+  private final TimeUnit unit;
+  private final PeriodicalTask service;
+
+  public BackgroundService(String serviceName, long interval,
+      TimeUnit unit, int threadPoolSize, long serviceTimeout) {
+    this.interval = interval;
+    this.unit = unit;
+    this.serviceName = serviceName;
+    this.serviceTimeout = serviceTimeout;
+    threadGroup = new ThreadGroup(serviceName);
+    ThreadFactory tf = r -> new Thread(threadGroup, r);
+    threadFactory = new ThreadFactoryBuilder()
+        .setThreadFactory(tf)
+        .setDaemon(true)
+        .setNameFormat(serviceName + "#%d")
+        .build();
+    exec = Executors.newScheduledThreadPool(threadPoolSize, threadFactory);
+    service = new PeriodicalTask();
+  }
+
+  protected ExecutorService getExecutorService() {
+    return this.exec;
+  }
+
+  @VisibleForTesting
+  public int getThreadCount() {
+    return threadGroup.activeCount();
+  }
+
+  @VisibleForTesting
+  public void triggerBackgroundTaskForTesting() {
+    service.run();
+  }
+
+  // start service
+  public void start() {
+    exec.scheduleWithFixedDelay(service, 0, interval, unit);
+  }
+
+  public abstract BackgroundTaskQueue getTasks();
+
+  /**
+   * Run one or more background tasks concurrently.
+   * Wait until all tasks to return the result.
+   */
+  public class PeriodicalTask implements Runnable {
+    @Override
+    public synchronized void run() {
+      LOG.debug("Running background service : {}", serviceName);
+      BackgroundTaskQueue tasks = getTasks();
+      if (tasks.isEmpty()) {
+        // No task found, or some problems to init tasks
+        // return and retry in next interval.
+        return;
+      }
+
+      LOG.debug("Number of background tasks to execute : {}", tasks.size());
+      CompletionService<BackgroundTaskResult> taskCompletionService =
+          new ExecutorCompletionService<>(exec);
+
+      List<Future<BackgroundTaskResult>> results = Lists.newArrayList();
+      while (tasks.size() > 0) {
+        BackgroundTask task = tasks.poll();
+        Future<BackgroundTaskResult> result =
+            taskCompletionService.submit(task);
+        results.add(result);
+      }
+
+      results.parallelStream().forEach(taskResultFuture -> {
+        try {
+          // Collect task results
+          BackgroundTaskResult result = serviceTimeout > 0
+              ? taskResultFuture.get(serviceTimeout, TimeUnit.MILLISECONDS)
+              : taskResultFuture.get();
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("task execution result size {}", result.getSize());
+          }
+        } catch (InterruptedException | ExecutionException e) {
+          LOG.warn(
+              "Background task fails to execute, "
+                  + "retrying in next interval", e);
+        } catch (TimeoutException e) {
+          LOG.warn("Background task executes timed out, "
+              + "retrying in next interval", e);
+        }
+      });
+    }
+  }
+
+  // shutdown and make sure all threads are properly released.
+  public void shutdown() {
+    LOG.info("Shutting down service {}", this.serviceName);
+    exec.shutdown();
+    try {
+      if (!exec.awaitTermination(60, TimeUnit.SECONDS)) {
+        exec.shutdownNow();
+      }
+    } catch (InterruptedException e) {
+      exec.shutdownNow();
+    }
+    if (threadGroup.activeCount() == 0 && !threadGroup.isDestroyed()) {
+      threadGroup.destroy();
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
new file mode 100644
index 0000000..47e8ebc
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import java.util.concurrent.Callable;
+
+/**
+ * A task thread to run by {@link BackgroundService}.
+ */
+public interface BackgroundTask<T> extends Callable<T> {
+
+  int getPriority();
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
new file mode 100644
index 0000000..b56ef0c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import java.util.PriorityQueue;
+
+/**
+ * A priority queue that stores a number of {@link BackgroundTask}.
+ */
+public class BackgroundTaskQueue {
+
+  private final PriorityQueue<BackgroundTask> tasks;
+
+  public BackgroundTaskQueue() {
+    tasks = new PriorityQueue<>((task1, task2)
+        -> task1.getPriority() - task2.getPriority());
+  }
+
+  /**
+   * @return the head task in this queue.
+   */
+  public synchronized BackgroundTask poll() {
+    return tasks.poll();
+  }
+
+  /**
+   * Add a {@link BackgroundTask} to the queue,
+   * the task will be sorted by its priority.
+   *
+   * @param task
+   */
+  public synchronized void add(BackgroundTask task) {
+    tasks.add(task);
+  }
+
+  /**
+   * @return true if the queue contains no task, false otherwise.
+   */
+  public synchronized boolean isEmpty() {
+    return tasks.isEmpty();
+  }
+
+  /**
+   * @return the size of the queue.
+   */
+  public synchronized int size() {
+    return tasks.size();
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
new file mode 100644
index 0000000..198300f
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.utils;
+
+/**
+ * Result of a {@link BackgroundTask}.
+ */
+public interface BackgroundTaskResult {
+
+  /**
+   * Returns the size of entries included in this result.
+   */
+  int getSize();
+
+  /**
+   * An empty task result implementation.
+   */
+  class EmptyTaskResult implements BackgroundTaskResult {
+
+    public static EmptyTaskResult newResult() {
+      return new EmptyTaskResult();
+    }
+
+    @Override
+    public int getSize() {
+      return 0;
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java
new file mode 100644
index 0000000..47699eb
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * An utility class to store a batch of DB write operations.
+ */
+public class BatchOperation {
+
+  /**
+   * Enum for write operations.
+   */
+  public enum Operation {
+    DELETE, PUT
+  }
+
+  private List<SingleOperation> operations =
+      Lists.newArrayList();
+
+  /**
+   * Add a PUT operation into the batch.
+   */
+  public void put(byte[] key, byte[] value) {
+    operations.add(new SingleOperation(Operation.PUT, key, value));
+  }
+
+  /**
+   * Add a DELETE operation into the batch.
+   */
+  public void delete(byte[] key) {
+    operations.add(new SingleOperation(Operation.DELETE, key, null));
+
+  }
+
+  public List<SingleOperation> getOperations() {
+    return operations;
+  }
+
+  /**
+   * A SingleOperation represents a PUT or DELETE operation
+   * and the data the operation needs to manipulates.
+   */
+  public static class SingleOperation {
+
+    private Operation opt;
+    private byte[] key;
+    private byte[] value;
+
+    public SingleOperation(Operation opt, byte[] key, byte[] value) {
+      this.opt = opt;
+      if (key == null) {
+        throw new IllegalArgumentException("key cannot be null");
+      }
+      this.key = key.clone();
+      this.value = value == null ? null : value.clone();
+    }
+
+    public Operation getOpt() {
+      return opt;
+    }
+
+    public byte[] getKey() {
+      return key.clone();
+    }
+
+    public byte[] getValue() {
+      return value == null ? null : value.clone();
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
new file mode 100644
index 0000000..c407398
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import java.io.IOException;
+
+/**
+ * A consumer for metadata store key-value entries.
+ * Used by {@link MetadataStore} class.
+ */
+@FunctionalInterface
+public interface EntryConsumer {
+
+  /**
+   * Consumes a key and value and produces a boolean result.
+   * @param key key
+   * @param value value
+   * @return a boolean value produced by the consumer
+   * @throws IOException
+   */
+  boolean consume(byte[] key, byte[] value) throws IOException;
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
new file mode 100644
index 0000000..83ca83d
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
@@ -0,0 +1,380 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
+import org.fusesource.leveldbjni.JniDBFactory;
+import org.iq80.leveldb.DB;
+import org.iq80.leveldb.DBIterator;
+import org.iq80.leveldb.Options;
+import org.iq80.leveldb.ReadOptions;
+import org.iq80.leveldb.Snapshot;
+import org.iq80.leveldb.WriteBatch;
+import org.iq80.leveldb.WriteOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * LevelDB interface.
+ */
+public class LevelDBStore implements MetadataStore {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(LevelDBStore.class);
+
+  private DB db;
+  private final File dbFile;
+  private final Options dbOptions;
+  private final WriteOptions writeOptions;
+
+  public LevelDBStore(File dbPath, boolean createIfMissing)
+      throws IOException {
+    dbOptions = new Options();
+    dbOptions.createIfMissing(createIfMissing);
+    this.dbFile = dbPath;
+    this.writeOptions = new WriteOptions().sync(true);
+    openDB(dbPath, dbOptions);
+  }
+
+  /**
+   * Opens a DB file.
+   *
+   * @param dbPath          - DB File path
+   * @throws IOException
+   */
+  public LevelDBStore(File dbPath, Options options)
+      throws IOException {
+    dbOptions = options;
+    this.dbFile = dbPath;
+    this.writeOptions = new WriteOptions().sync(true);
+    openDB(dbPath, dbOptions);
+  }
+
+  private void openDB(File dbPath, Options options) throws IOException {
+    dbPath.getParentFile().mkdirs();
+    db = JniDBFactory.factory.open(dbPath, options);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("LevelDB successfully opened");
+      LOG.debug("[Option] cacheSize = " + options.cacheSize());
+      LOG.debug("[Option] createIfMissing = " + options.createIfMissing());
+      LOG.debug("[Option] blockSize = " + options.blockSize());
+      LOG.debug("[Option] compressionType= " + options.compressionType());
+      LOG.debug("[Option] maxOpenFiles= " + options.maxOpenFiles());
+      LOG.debug("[Option] writeBufferSize= "+ options.writeBufferSize());
+    }
+  }
+
+  /**
+   * Puts a Key into file.
+   *
+   * @param key   - key
+   * @param value - value
+   */
+  @Override
+  public void put(byte[] key, byte[] value) {
+    db.put(key, value, writeOptions);
+  }
+
+  /**
+   * Get Key.
+   *
+   * @param key key
+   * @return value
+   */
+  @Override
+  public byte[] get(byte[] key) {
+    return db.get(key);
+  }
+
+  /**
+   * Delete Key.
+   *
+   * @param key - Key
+   */
+  @Override
+  public void delete(byte[] key) {
+    db.delete(key);
+  }
+
+  /**
+   * Closes the DB.
+   *
+   * @throws IOException
+   */
+  @Override
+  public void close() throws IOException {
+    if (db != null){
+      db.close();
+    }
+  }
+
+  /**
+   * Returns true if the DB is empty.
+   *
+   * @return boolean
+   * @throws IOException
+   */
+  @Override
+  public boolean isEmpty() throws IOException {
+    try (DBIterator iter = db.iterator()) {
+      iter.seekToFirst();
+      boolean hasNext = !iter.hasNext();
+      return hasNext;
+    }
+  }
+
+  /**
+   * Returns the actual levelDB object.
+   * @return DB handle.
+   */
+  public DB getDB() {
+    return db;
+  }
+
+  /**
+   * Returns an iterator on all the key-value pairs in the DB.
+   * @return an iterator on DB entries.
+   */
+  public DBIterator getIterator() {
+    return db.iterator();
+  }
+
+
+  @Override
+  public void destroy() throws IOException {
+    close();
+    JniDBFactory.factory.destroy(dbFile, dbOptions);
+  }
+
+  @Override
+  public ImmutablePair<byte[], byte[]> peekAround(int offset,
+      byte[] from) throws IOException, IllegalArgumentException {
+    try (DBIterator it = db.iterator()) {
+      if (from == null) {
+        it.seekToFirst();
+      } else {
+        it.seek(from);
+      }
+      if (!it.hasNext()) {
+        return null;
+      }
+      switch (offset) {
+      case 0:
+        Entry<byte[], byte[]> current = it.next();
+        return new ImmutablePair<>(current.getKey(), current.getValue());
+      case 1:
+        if (it.next() != null && it.hasNext()) {
+          Entry<byte[], byte[]> next = it.peekNext();
+          return new ImmutablePair<>(next.getKey(), next.getValue());
+        }
+        break;
+      case -1:
+        if (it.hasPrev()) {
+          Entry<byte[], byte[]> prev = it.peekPrev();
+          return new ImmutablePair<>(prev.getKey(), prev.getValue());
+        }
+        break;
+      default:
+        throw new IllegalArgumentException(
+            "Position can only be -1, 0 " + "or 1, but found " + offset);
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public void iterate(byte[] from, EntryConsumer consumer)
+      throws IOException {
+    try (DBIterator iter = db.iterator()) {
+      if (from != null) {
+        iter.seek(from);
+      } else {
+        iter.seekToFirst();
+      }
+      while (iter.hasNext()) {
+        Entry<byte[], byte[]> current = iter.next();
+        if (!consumer.consume(current.getKey(),
+            current.getValue())) {
+          break;
+        }
+      }
+    }
+  }
+
+  /**
+   * Compacts the DB by removing deleted keys etc.
+   * @throws IOException if there is an error.
+   */
+  @Override
+  public void compactDB() throws IOException {
+    if(db != null) {
+      // From LevelDB docs : begin == null and end == null means the whole DB.
+      db.compactRange(null, null);
+    }
+  }
+
+  @Override
+  public void writeBatch(BatchOperation operation) throws IOException {
+    List<BatchOperation.SingleOperation> operations =
+        operation.getOperations();
+    if (!operations.isEmpty()) {
+      try (WriteBatch writeBatch = db.createWriteBatch()) {
+        for (BatchOperation.SingleOperation opt : operations) {
+          switch (opt.getOpt()) {
+          case DELETE:
+            writeBatch.delete(opt.getKey());
+            break;
+          case PUT:
+            writeBatch.put(opt.getKey(), opt.getValue());
+            break;
+          default:
+            throw new IllegalArgumentException("Invalid operation "
+                + opt.getOpt());
+          }
+        }
+        db.write(writeBatch);
+      }
+    }
+  }
+
+  @Override
+  public List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
+      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
+      throws IOException, IllegalArgumentException {
+    return getRangeKVs(startKey, count, false, filters);
+  }
+
+  @Override
+  public List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
+      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
+      throws IOException, IllegalArgumentException {
+    return getRangeKVs(startKey, count, true, filters);
+  }
+
+  /**
+   * Returns a certain range of key value pairs as a list based on a
+   * startKey or count. Further a {@link MetadataKeyFilter} can be added to
+   * filter keys if necessary. To prevent race conditions while listing
+   * entries, this implementation takes a snapshot and lists the entries from
+   * the snapshot. This may, on the other hand, cause the range result slight
+   * different with actual data if data is updating concurrently.
+   * <p>
+   * If the startKey is specified and found in levelDB, this key and the keys
+   * after this key will be included in the result. If the startKey is null
+   * all entries will be included as long as other conditions are satisfied.
+   * If the given startKey doesn't exist, an empty list will be returned.
+   * <p>
+   * The count argument is to limit number of total entries to return,
+   * the value for count must be an integer greater than 0.
+   * <p>
+   * This method allows to specify one or more {@link MetadataKeyFilter}
+   * to filter keys by certain condition. Once given, only the entries
+   * whose key passes all the filters will be included in the result.
+   *
+   * @param startKey a start key.
+   * @param count max number of entries to return.
+   * @param filters customized one or more {@link MetadataKeyFilter}.
+   * @return a list of entries found in the database or an empty list if the
+   * startKey is invalid.
+   * @throws IOException if there are I/O errors.
+   * @throws IllegalArgumentException if count is less than 0.
+   */
+  private List<Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
+      int count, boolean sequential, MetadataKeyFilter... filters)
+      throws IOException {
+    List<Entry<byte[], byte[]>> result = new ArrayList<>();
+    long start = System.currentTimeMillis();
+    if (count < 0) {
+      throw new IllegalArgumentException(
+          "Invalid count given " + count + ", count must be greater than 0");
+    }
+    Snapshot snapShot = null;
+    DBIterator dbIter = null;
+    try {
+      snapShot = db.getSnapshot();
+      ReadOptions readOptions = new ReadOptions().snapshot(snapShot);
+      dbIter = db.iterator(readOptions);
+      if (startKey == null) {
+        dbIter.seekToFirst();
+      } else {
+        if (db.get(startKey) == null) {
+          // Key not found, return empty list
+          return result;
+        }
+        dbIter.seek(startKey);
+      }
+      while (dbIter.hasNext() && result.size() < count) {
+        byte[] preKey = dbIter.hasPrev() ? dbIter.peekPrev().getKey() : null;
+        byte[] nextKey = dbIter.hasNext() ? dbIter.peekNext().getKey() : null;
+        Entry<byte[], byte[]> current = dbIter.next();
+
+        if (filters == null) {
+          result.add(current);
+        } else {
+          if (Arrays.asList(filters).stream().allMatch(
+              entry -> entry.filterKey(preKey, current.getKey(), nextKey))) {
+            result.add(current);
+          } else {
+            if (result.size() > 0 && sequential) {
+              // if the caller asks for a sequential range of results,
+              // and we met a dis-match, abort iteration from here.
+              // if result is empty, we continue to look for the first match.
+              break;
+            }
+          }
+        }
+      }
+    } finally {
+      if (snapShot != null) {
+        snapShot.close();
+      }
+      if (dbIter != null) {
+        dbIter.close();
+      }
+      if (LOG.isDebugEnabled()) {
+        if (filters != null) {
+          for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
+            int scanned = filter.getKeysScannedNum();
+            int hinted = filter.getKeysHintedNum();
+            if (scanned > 0 || hinted > 0) {
+              LOG.debug(
+                  "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
+                  filter.getClass().getSimpleName(), filter.getKeysScannedNum(),
+                  filter.getKeysHintedNum());
+            }
+          }
+        }
+        long end = System.currentTimeMillis();
+        long timeConsumed = end - start;
+        LOG.debug("Time consumed for getRangeKVs() is {}ms,"
+            + " result length is {}.", timeConsumed, result.size());
+      }
+    }
+    return result;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
new file mode 100644
index 0000000..3ff0a94
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.utils;
+
+import com.google.common.base.Strings;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+/**
+ * An utility class to filter levelDB keys.
+ */
+public final class MetadataKeyFilters {
+
+  private static KeyPrefixFilter deletingKeyFilter =
+      new MetadataKeyFilters.KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX);
+
+  private static KeyPrefixFilter normalKeyFilter =
+      new MetadataKeyFilters.KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX,
+          true);
+
+  private MetadataKeyFilters() {
+  }
+
+  public static KeyPrefixFilter getDeletingKeyFilter() {
+    return deletingKeyFilter;
+  }
+
+  public static KeyPrefixFilter getNormalKeyFilter() {
+    return normalKeyFilter;
+  }
+  /**
+   * Interface for levelDB key filters.
+   */
+  public interface MetadataKeyFilter {
+    /**
+     * Filter levelDB key with a certain condition.
+     *
+     * @param preKey     previous key.
+     * @param currentKey current key.
+     * @param nextKey    next key.
+     * @return true if a certain condition satisfied, return false otherwise.
+     */
+    boolean filterKey(byte[] preKey, byte[] currentKey, byte[] nextKey);
+
+    default int getKeysScannedNum() {
+      return 0;
+    }
+
+    default int getKeysHintedNum() {
+      return 0;
+    }
+  }
+
+  /**
+   * Utility class to filter key by a string prefix. This filter
+   * assumes keys can be parsed to a string.
+   */
+  public static class KeyPrefixFilter implements MetadataKeyFilter {
+
+    private String keyPrefix = null;
+    private int keysScanned = 0;
+    private int keysHinted = 0;
+    private Boolean negative;
+
+    public KeyPrefixFilter(String keyPrefix) {
+      this(keyPrefix, false);
+    }
+
+    public KeyPrefixFilter(String keyPrefix, boolean negative) {
+      this.keyPrefix = keyPrefix;
+      this.negative = negative;
+    }
+
+    @Override
+    public boolean filterKey(byte[] preKey, byte[] currentKey,
+        byte[] nextKey) {
+      keysScanned++;
+      boolean accept = false;
+      if (Strings.isNullOrEmpty(keyPrefix)) {
+        accept = true;
+      } else {
+        if (currentKey != null &&
+            DFSUtil.bytes2String(currentKey).startsWith(keyPrefix)) {
+          keysHinted++;
+          accept = true;
+        } else {
+          accept = false;
+        }
+      }
+      return (negative) ? !accept : accept;
+    }
+
+    @Override
+    public int getKeysScannedNum() {
+      return keysScanned;
+    }
+
+    @Override
+    public int getKeysHintedNum() {
+      return keysHinted;
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java
new file mode 100644
index 0000000..b90b08f
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Interface for key-value store that stores ozone metadata.
+ * Ozone metadata is stored as key value pairs, both key and value
+ * are arbitrary byte arrays.
+ */
+@InterfaceStability.Evolving
+public interface MetadataStore extends Closeable{
+
+  /**
+   * Puts a key-value pair into the store.
+   *
+   * @param key metadata key
+   * @param value metadata value
+   */
+  void put(byte[] key, byte[] value) throws IOException;
+
+  /**
+   * @return true if the metadata store is empty.
+   *
+   * @throws IOException
+   */
+  boolean isEmpty() throws IOException;
+
+  /**
+   * Returns the value mapped to the given key in byte array.
+   *
+   * @param key metadata key
+   * @return value in byte array
+   * @throws IOException
+   */
+  byte[] get(byte[] key) throws IOException;
+
+  /**
+   * Deletes a key from the metadata store.
+   *
+   * @param key metadata key
+   * @throws IOException
+   */
+  void delete(byte[] key) throws IOException;
+
+  /**
+   * Returns a certain range of key value pairs as a list based on a
+   * startKey or count. Further a {@link MetadataKeyFilter} can be added to
+   * filter keys if necessary. To prevent race conditions while listing
+   * entries, this implementation takes a snapshot and lists the entries from
+   * the snapshot. This may, on the other hand, cause the range result slight
+   * different with actual data if data is updating concurrently.
+   * <p>
+   * If the startKey is specified and found in levelDB, this key and the keys
+   * after this key will be included in the result. If the startKey is null
+   * all entries will be included as long as other conditions are satisfied.
+   * If the given startKey doesn't exist and empty list will be returned.
+   * <p>
+   * The count argument is to limit number of total entries to return,
+   * the value for count must be an integer greater than 0.
+   * <p>
+   * This method allows to specify one or more {@link MetadataKeyFilter}
+   * to filter keys by certain condition. Once given, only the entries
+   * whose key passes all the filters will be included in the result.
+   *
+   * @param startKey a start key.
+   * @param count max number of entries to return.
+   * @param filters customized one or more {@link MetadataKeyFilter}.
+   * @return a list of entries found in the database or an empty list if the
+   * startKey is invalid.
+   * @throws IOException if there are I/O errors.
+   * @throws IllegalArgumentException if count is less than 0.
+   */
+  List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
+      int count, MetadataKeyFilter... filters)
+      throws IOException, IllegalArgumentException;
+
+  /**
+   * This method is very similar to {@link #getRangeKVs}, the only
+   * different is this method is supposed to return a sequential range
+   * of elements based on the filters. While iterating the elements,
+   * if it met any entry that cannot pass the filter, the iterator will stop
+   * from this point without looking for next match. If no filter is given,
+   * this method behaves just like {@link #getRangeKVs}.
+   *
+   * @param startKey a start key.
+   * @param count max number of entries to return.
+   * @param filters customized one or more {@link MetadataKeyFilter}.
+   * @return a list of entries found in the database.
+   * @throws IOException
+   * @throws IllegalArgumentException
+   */
+  List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
+      int count, MetadataKeyFilter... filters)
+      throws IOException, IllegalArgumentException;
+
+  /**
+   * A batch of PUT, DELETE operations handled as a single atomic write.
+   *
+   * @throws IOException write fails
+   */
+  void writeBatch(BatchOperation operation) throws IOException;
+
+  /**
+   * Compact the entire database.
+   * @throws IOException
+   */
+  void compactDB() throws IOException;
+
+  /**
+   * Destroy the content of the specified database,
+   * a destroyed database will not be able to load again.
+   * Be very careful with this method.
+   *
+   * @throws IOException if I/O error happens
+   */
+  void destroy() throws IOException;
+
+  /**
+   * Seek the database to a certain key, returns the key-value
+   * pairs around this key based on the given offset. Note, this method
+   * can only support offset -1 (left), 0 (current) and 1 (right),
+   * any other offset given will cause a {@link IllegalArgumentException}.
+   *
+   * @param offset offset to the key
+   * @param from from which key
+   * @return a key-value pair
+   * @throws IOException
+   */
+  ImmutablePair<byte[], byte[]> peekAround(int offset, byte[] from)
+      throws IOException, IllegalArgumentException;
+
+  /**
+   * Iterates entries in the database from a certain key.
+   * Applies the given {@link EntryConsumer} to the key and value of
+   * each entry, the function produces a boolean result which is used
+   * as the criteria to exit from iteration.
+   *
+   * @param from the start key
+   * @param consumer
+   *   a {@link EntryConsumer} applied to each key and value. If the consumer
+   *   returns true, continues the iteration to next entry; otherwise exits
+   *   the iteration.
+   * @throws IOException
+   */
+  void iterate(byte[] from, EntryConsumer consumer)
+      throws IOException;
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
new file mode 100644
index 0000000..9e9c32a
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.iq80.leveldb.Options;
+import org.rocksdb.BlockBasedTableConfig;
+import org.rocksdb.Statistics;
+import org.rocksdb.StatsLevel;
+
+import java.io.File;
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_METADATA_STORE_IMPL_LEVELDB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_METADATA_STORE_IMPL_ROCKSDB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
+
+/**
+ * Builder for metadata store.
+ */
+public class MetadataStoreBuilder {
+
+  private File dbFile;
+  private long cacheSize;
+  private boolean createIfMissing = true;
+  private Configuration conf;
+
+  public static MetadataStoreBuilder newBuilder() {
+    return new MetadataStoreBuilder();
+  }
+
+  public MetadataStoreBuilder setDbFile(File dbPath) {
+    this.dbFile = dbPath;
+    return this;
+  }
+
+  public MetadataStoreBuilder setCacheSize(long cache) {
+    this.cacheSize = cache;
+    return this;
+  }
+
+  public MetadataStoreBuilder setCreateIfMissing(boolean doCreate) {
+    this.createIfMissing = doCreate;
+    return this;
+  }
+
+  public MetadataStoreBuilder setConf(Configuration configuration) {
+    this.conf = configuration;
+    return this;
+  }
+
+  public MetadataStore build() throws IOException {
+    if (dbFile == null) {
+      throw new IllegalArgumentException("Failed to build metadata store, "
+          + "dbFile is required but not found");
+    }
+
+    // Build db store based on configuration
+    MetadataStore store = null;
+    String impl = conf == null ?
+        OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT :
+        conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
+            OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
+    if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(impl)) {
+      Options options = new Options();
+      options.createIfMissing(createIfMissing);
+      if (cacheSize > 0) {
+        options.cacheSize(cacheSize);
+      }
+      store = new LevelDBStore(dbFile, options);
+    } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(impl)) {
+      org.rocksdb.Options opts = new org.rocksdb.Options();
+      opts.setCreateIfMissing(createIfMissing);
+
+      if (cacheSize > 0) {
+        BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
+        tableConfig.setBlockCacheSize(cacheSize);
+        opts.setTableFormatConfig(tableConfig);
+      }
+
+      String rocksDbStat = conf == null ?
+          OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT :
+          conf.getTrimmed(OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
+              OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
+
+      if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
+        Statistics statistics = new Statistics();
+        statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
+        opts = opts.setStatistics(statistics);
+
+      }
+      store = new RocksDBStore(dbFile, opts);
+    } else {
+      throw new IllegalArgumentException("Invalid argument for "
+          + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL
+          + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB
+          + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB
+          + ", but met " + impl);
+    }
+    return store;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
new file mode 100644
index 0000000..a60e98d
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
+import org.rocksdb.DbPath;
+import org.rocksdb.Options;
+import org.rocksdb.RocksDB;
+import org.rocksdb.RocksDBException;
+import org.rocksdb.RocksIterator;
+import org.rocksdb.WriteBatch;
+import org.rocksdb.WriteOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.ObjectName;
+import java.io.File;
+import java.io.IOException;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * RocksDB implementation of ozone metadata store.
+ */
+public class RocksDBStore implements MetadataStore {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(RocksDBStore.class);
+
+  private RocksDB db = null;
+  private File dbLocation;
+  private WriteOptions writeOptions;
+  private Options dbOptions;
+  private ObjectName statMBeanName;
+
+  public RocksDBStore(File dbFile, Options options)
+      throws IOException {
+    Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
+    RocksDB.loadLibrary();
+    dbOptions = options;
+    dbLocation = dbFile;
+    writeOptions = new WriteOptions();
+    try {
+
+      db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());
+      if (dbOptions.statistics() != null) {
+
+        Map<String, String> jmxProperties = new HashMap<String, String>();
+        jmxProperties.put("dbName", dbFile.getName());
+        statMBeanName = MBeans.register("Ozone", "RocksDbStore", jmxProperties,
+            new RocksDBStoreMBean(dbOptions.statistics()));
+        if (statMBeanName == null) {
+          LOG.warn("jmx registration failed during RocksDB init, db path :{}",
+              dbFile.getAbsolutePath());
+        }
+      }
+    } catch (RocksDBException e) {
+      throw new IOException(
+          "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e);
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("RocksDB successfully opened.");
+      LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
+      LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
+      LOG.debug("[Option] compactionPriority= {}", options.compactionStyle());
+      LOG.debug("[Option] compressionType= {}", options.compressionType());
+      LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
+      LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize());
+    }
+  }
+
+  private IOException toIOException(String msg, RocksDBException e) {
+    String statusCode = e.getStatus() == null ? "N/A" :
+        e.getStatus().getCodeString();
+    String errMessage = e.getMessage() == null ? "Unknown error" :
+        e.getMessage();
+    String output = msg + "; status : " + statusCode
+        + "; message : " + errMessage;
+    return new IOException(output, e);
+  }
+
+  @Override
+  public void put(byte[] key, byte[] value) throws IOException {
+    try {
+      db.put(writeOptions, key, value);
+    } catch (RocksDBException e) {
+      throw toIOException("Failed to put key-value to metadata store", e);
+    }
+  }
+
+  @Override
+  public boolean isEmpty() throws IOException {
+    RocksIterator it = null;
+    try {
+      it = db.newIterator();
+      it.seekToFirst();
+      return !it.isValid();
+    } finally {
+      if (it != null) {
+        it.close();
+      }
+    }
+  }
+
+  @Override
+  public byte[] get(byte[] key) throws IOException {
+    try {
+      return db.get(key);
+    } catch (RocksDBException e) {
+      throw toIOException("Failed to get the value for the given key", e);
+    }
+  }
+
+  @Override
+  public void delete(byte[] key) throws IOException {
+    try {
+      db.delete(key);
+    } catch (RocksDBException e) {
+      throw toIOException("Failed to delete the given key", e);
+    }
+  }
+
+  @Override
+  public List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
+      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
+      throws IOException, IllegalArgumentException {
+    return getRangeKVs(startKey, count, false, filters);
+  }
+
+  @Override
+  public List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
+      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
+      throws IOException, IllegalArgumentException {
+    return getRangeKVs(startKey, count, true, filters);
+  }
+
+  private List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
+      int count, boolean sequential,
+      MetadataKeyFilters.MetadataKeyFilter... filters)
+      throws IOException, IllegalArgumentException {
+    List<Map.Entry<byte[], byte[]>> result = new ArrayList<>();
+    long start = System.currentTimeMillis();
+    if (count < 0) {
+      throw new IllegalArgumentException(
+          "Invalid count given " + count + ", count must be greater than 0");
+    }
+    RocksIterator it = null;
+    try {
+      it = db.newIterator();
+      if (startKey == null) {
+        it.seekToFirst();
+      } else {
+        if(get(startKey) == null) {
+          // Key not found, return empty list
+          return result;
+        }
+        it.seek(startKey);
+      }
+      while(it.isValid() && result.size() < count) {
+        byte[] currentKey = it.key();
+        byte[] currentValue = it.value();
+
+        it.prev();
+        final byte[] prevKey = it.isValid() ? it.key() : null;
+
+        it.seek(currentKey);
+        it.next();
+        final byte[] nextKey = it.isValid() ? it.key() : null;
+
+        if (filters == null) {
+          result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey,
+              currentValue));
+        } else {
+          if (Arrays.asList(filters).stream()
+              .allMatch(entry -> entry.filterKey(prevKey,
+                  currentKey, nextKey))) {
+            result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey,
+                currentValue));
+          } else {
+            if (result.size() > 0 && sequential) {
+              // if the caller asks for a sequential range of results,
+              // and we met a dis-match, abort iteration from here.
+              // if result is empty, we continue to look for the first match.
+              break;
+            }
+          }
+        }
+      }
+    } finally {
+      if (it != null) {
+        it.close();
+      }
+      long end = System.currentTimeMillis();
+      long timeConsumed = end - start;
+      if (LOG.isDebugEnabled()) {
+        if (filters != null) {
+          for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
+            int scanned = filter.getKeysScannedNum();
+            int hinted = filter.getKeysHintedNum();
+            if (scanned > 0 || hinted > 0) {
+              LOG.debug(
+                  "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
+                  filter.getClass().getSimpleName(), filter.getKeysScannedNum(),
+                  filter.getKeysHintedNum());
+            }
+          }
+        }
+        LOG.debug("Time consumed for getRangeKVs() is {}ms,"
+            + " result length is {}.", timeConsumed, result.size());
+      }
+    }
+    return result;
+  }
+
+  @Override
+  public void writeBatch(BatchOperation operation)
+      throws IOException {
+    List<BatchOperation.SingleOperation> operations =
+        operation.getOperations();
+    if (!operations.isEmpty()) {
+      try (WriteBatch writeBatch = new WriteBatch()) {
+        for (BatchOperation.SingleOperation opt : operations) {
+          switch (opt.getOpt()) {
+          case DELETE:
+            writeBatch.remove(opt.getKey());
+            break;
+          case PUT:
+            writeBatch.put(opt.getKey(), opt.getValue());
+            break;
+          default:
+            throw new IllegalArgumentException("Invalid operation "
+                + opt.getOpt());
+          }
+        }
+        db.write(writeOptions, writeBatch);
+      } catch (RocksDBException e) {
+        throw toIOException("Batch write operation failed", e);
+      }
+    }
+  }
+
+  @Override
+  public void compactDB() throws IOException {
+    if (db != null) {
+      try {
+        db.compactRange();
+      } catch (RocksDBException e) {
+        throw toIOException("Failed to compact db", e);
+      }
+    }
+  }
+
+  private void deleteQuietly(File fileOrDir) {
+    if (fileOrDir != null && fileOrDir.exists()) {
+      try {
+        FileUtils.forceDelete(fileOrDir);
+      } catch (IOException e) {
+        LOG.warn("Failed to delete dir {}", fileOrDir.getAbsolutePath(), e);
+      }
+    }
+  }
+
+  @Override
+  public void destroy() throws IOException {
+    // Make sure db is closed.
+    close();
+
+    // There is no destroydb java API available,
+    // equivalently we can delete all db directories.
+    deleteQuietly(dbLocation);
+    deleteQuietly(new File(dbOptions.dbLogDir()));
+    deleteQuietly(new File(dbOptions.walDir()));
+    List<DbPath> dbPaths = dbOptions.dbPaths();
+    if (dbPaths != null) {
+      dbPaths.forEach(dbPath -> {
+        deleteQuietly(new File(dbPath.toString()));
+      });
+    }
+  }
+
+  @Override
+  public ImmutablePair<byte[], byte[]> peekAround(int offset,
+      byte[] from) throws IOException, IllegalArgumentException {
+    RocksIterator it = null;
+    try {
+      it = db.newIterator();
+      if (from == null) {
+        it.seekToFirst();
+      } else {
+        it.seek(from);
+      }
+      if (!it.isValid()) {
+        return null;
+      }
+
+      switch (offset) {
+      case 0:
+        break;
+      case 1:
+        it.next();
+        break;
+      case -1:
+        it.prev();
+        break;
+      default:
+        throw new IllegalArgumentException(
+            "Position can only be -1, 0 " + "or 1, but found " + offset);
+      }
+      return it.isValid() ? new ImmutablePair<>(it.key(), it.value()) : null;
+    } finally {
+      if (it != null) {
+        it.close();
+      }
+    }
+  }
+
+  @Override
+  public void iterate(byte[] from, EntryConsumer consumer)
+      throws IOException {
+    RocksIterator it = null;
+    try {
+      it = db.newIterator();
+      if (from != null) {
+        it.seek(from);
+      } else {
+        it.seekToFirst();
+      }
+      while (it.isValid()) {
+        if (!consumer.consume(it.key(), it.value())) {
+          break;
+        }
+        it.next();
+      }
+    } finally {
+      if (it != null) {
+        it.close();
+      }
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (statMBeanName != null) {
+      MBeans.unregister(statMBeanName);
+    }
+    if (db != null) {
+      db.close();
+    }
+
+  }
+
+  @VisibleForTesting
+  protected ObjectName getStatMBeanName() {
+    return statMBeanName;
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
new file mode 100644
index 0000000..88c093e
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.utils;
+
+import org.rocksdb.HistogramData;
+import org.rocksdb.HistogramType;
+import org.rocksdb.Statistics;
+import org.rocksdb.TickerType;
+
+import javax.management.Attribute;
+import javax.management.AttributeList;
+import javax.management.AttributeNotFoundException;
+import javax.management.DynamicMBean;
+import javax.management.InvalidAttributeValueException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanInfo;
+import javax.management.ReflectionException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Adapter JMX bean to publish all the Rocksdb metrics.
+ */
+public class RocksDBStoreMBean implements DynamicMBean {
+
+  private Statistics statistics;
+
+  private Set<String> histogramAttributes = new HashSet<>();
+
+  public RocksDBStoreMBean(Statistics statistics) {
+    this.statistics = statistics;
+    histogramAttributes.add("Average");
+    histogramAttributes.add("Median");
+    histogramAttributes.add("Percentile95");
+    histogramAttributes.add("Percentile99");
+    histogramAttributes.add("StandardDeviation");
+  }
+
+  @Override
+  public Object getAttribute(String attribute)
+      throws AttributeNotFoundException, MBeanException, ReflectionException {
+    for (String histogramAttribute : histogramAttributes) {
+      if (attribute.endsWith("_" + histogramAttribute.toUpperCase())) {
+        String keyName = attribute
+            .substring(0, attribute.length() - histogramAttribute.length() - 1);
+        try {
+          HistogramData histogram =
+              statistics.getHistogramData(HistogramType.valueOf(keyName));
+          try {
+            Method method =
+                HistogramData.class.getMethod("get" + histogramAttribute);
+            return method.invoke(histogram);
+          } catch (Exception e) {
+            throw new ReflectionException(e,
+                "Can't read attribute " + attribute);
+          }
+        } catch (IllegalArgumentException exception) {
+          throw new AttributeNotFoundException(
+              "No such attribute in RocksDB stats: " + attribute);
+        }
+      }
+    }
+    try {
+      return statistics.getTickerCount(TickerType.valueOf(attribute));
+    } catch (IllegalArgumentException ex) {
+      throw new AttributeNotFoundException(
+          "No such attribute in RocksDB stats: " + attribute);
+    }
+  }
+
+  @Override
+  public void setAttribute(Attribute attribute)
+      throws AttributeNotFoundException, InvalidAttributeValueException,
+      MBeanException, ReflectionException {
+
+  }
+
+  @Override
+  public AttributeList getAttributes(String[] attributes) {
+    AttributeList result = new AttributeList();
+    for (String attributeName : attributes) {
+      try {
+        Object value = getAttribute(attributeName);
+        result.add(value);
+      } catch (Exception e) {
+        //TODO
+      }
+    }
+    return result;
+  }
+
+  @Override
+  public AttributeList setAttributes(AttributeList attributes) {
+    return null;
+  }
+
+  @Override
+  public Object invoke(String actionName, Object[] params, String[] signature)
+      throws MBeanException, ReflectionException {
+    return null;
+  }
+
+  @Override
+  public MBeanInfo getMBeanInfo() {
+
+    List<MBeanAttributeInfo> attributes = new ArrayList<>();
+    for (TickerType tickerType : TickerType.values()) {
+      attributes.add(new MBeanAttributeInfo(tickerType.name(), "long",
+          "RocksDBStat: " + tickerType.name(), true, false, false));
+    }
+    for (HistogramType histogramType : HistogramType.values()) {
+      for (String histogramAttribute : histogramAttributes) {
+        attributes.add(new MBeanAttributeInfo(
+            histogramType.name() + "_" + histogramAttribute.toUpperCase(),
+            "long", "RocksDBStat: " + histogramType.name(), true, false,
+            false));
+      }
+    }
+
+    return new MBeanInfo("", "RocksDBStat",
+        attributes.toArray(new MBeanAttributeInfo[0]), null, null, null);
+
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java
new file mode 100644
index 0000000..4466337
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.utils;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
new file mode 100644
index 0000000..3a55831
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.ratis;
+
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.ratis.client.RaftClient;
+import org.apache.ratis.conf.RaftProperties;
+import org.apache.ratis.grpc.GrpcConfigKeys;
+import org.apache.ratis.protocol.RaftGroup;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.RaftPeer;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.rpc.RpcType;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
+import org.apache.ratis.util.SizeInBytes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Ratis helper methods.
+ */
+public interface RatisHelper {
+  Logger LOG = LoggerFactory.getLogger(RatisHelper.class);
+
+  static String toRaftPeerIdString(DatanodeDetails id) {
+    return id.getUuidString() + "_" + id.getRatisPort();
+  }
+
+  static String toRaftPeerAddressString(DatanodeDetails id) {
+    return id.getIpAddress() + ":" + id.getRatisPort();
+  }
+
+  static RaftPeerId toRaftPeerId(DatanodeDetails id) {
+    return RaftPeerId.valueOf(toRaftPeerIdString(id));
+  }
+
+  static RaftPeer toRaftPeer(DatanodeDetails id) {
+    return new RaftPeer(toRaftPeerId(id), toRaftPeerAddressString(id));
+  }
+
+  static List<RaftPeer> toRaftPeers(Pipeline pipeline) {
+    return toRaftPeers(pipeline.getMachines());
+  }
+
+  static <E extends DatanodeDetails> List<RaftPeer> toRaftPeers(
+      List<E> datanodes) {
+    return datanodes.stream().map(RatisHelper::toRaftPeer)
+        .collect(Collectors.toList());
+  }
+
+  /* TODO: use a dummy id for all groups for the moment.
+   *       It should be changed to a unique id for each group.
+   */
+  RaftGroupId DUMMY_GROUP_ID =
+      RaftGroupId.valueOf(ByteString.copyFromUtf8("AOzoneRatisGroup"));
+
+  RaftGroup EMPTY_GROUP = new RaftGroup(DUMMY_GROUP_ID,
+      Collections.emptyList());
+
+  static RaftGroup emptyRaftGroup() {
+    return EMPTY_GROUP;
+  }
+
+  static RaftGroup newRaftGroup(List<DatanodeDetails> datanodes) {
+    final List<RaftPeer> newPeers = datanodes.stream()
+        .map(RatisHelper::toRaftPeer)
+        .collect(Collectors.toList());
+    return RatisHelper.newRaftGroup(newPeers);
+  }
+
+  static RaftGroup newRaftGroup(Collection<RaftPeer> peers) {
+    return peers.isEmpty()? emptyRaftGroup()
+        : new RaftGroup(DUMMY_GROUP_ID, peers);
+  }
+
+  static RaftGroup newRaftGroup(Pipeline pipeline) {
+    return newRaftGroup(toRaftPeers(pipeline));
+  }
+
+  static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline) {
+    return newRaftClient(rpcType, toRaftPeerId(pipeline.getLeader()),
+        newRaftGroup(pipeline));
+  }
+
+  static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader) {
+    return newRaftClient(rpcType, leader.getId(),
+        newRaftGroup(new ArrayList<>(Arrays.asList(leader))));
+  }
+
+  static RaftClient newRaftClient(
+      RpcType rpcType, RaftPeerId leader, RaftGroup group) {
+    LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, leader, group);
+    final RaftProperties properties = new RaftProperties();
+    RaftConfigKeys.Rpc.setType(properties, rpcType);
+    GrpcConfigKeys.setMessageSizeMax(properties,
+        SizeInBytes.valueOf(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE));
+
+    return RaftClient.newBuilder()
+        .setRaftGroup(group)
+        .setLeaderId(leader)
+        .setProperties(properties)
+        .build();
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java
new file mode 100644
index 0000000..c13c20c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ratis;
+
+/**
+ * This package contains classes related to Apache Ratis.
+ */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java
new file mode 100644
index 0000000..29242ad
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.ratis.shaded.com.google.protobuf;
+
+/** Utilities for the shaded protobuf in Ratis. */
+public interface ShadedProtoUtil {
+  /**
+   * @param bytes
+   * @return the wrapped shaded {@link ByteString} (no coping).
+   */
+  static ByteString asShadedByteString(byte[] bytes) {
+    return ByteString.wrap(bytes);
+  }
+
+  /**
+   * @param shaded
+   * @return a {@link com.google.protobuf.ByteString} (require coping).
+   */
+  static com.google.protobuf.ByteString asByteString(ByteString shaded) {
+    return com.google.protobuf.ByteString.copyFrom(
+        shaded.asReadOnlyByteBuffer());
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java
new file mode 100644
index 0000000..032dd96
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ratis.shaded.com.google.protobuf;
+
+/**
+ * This package contains classes related to the shaded protobuf in Apache Ratis.
+ */
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
new file mode 100644
index 0000000..a6270ef
--- /dev/null
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -0,0 +1,415 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and Unstable.
+ * Please see http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/InterfaceClassification.html
+ * for what changes are allowed for a *Unstable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used to transfer data
+// to and from the datanode.
+option java_package = "org.apache.hadoop.hdds.protocol.proto";
+option java_outer_classname = "ContainerProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdds;
+import "hdfs.proto";
+import "hdds.proto";
+
+/**
+ * Commands that are used to manipulate the state of containers on a datanode.
+ *
+ * These commands allow us to work against the datanode - from
+ * StorageContainer Manager as well as clients.
+ *
+ *  1. CreateContainer - This call is usually made by Storage Container
+ *     manager, when we need to create a new container on a given datanode.
+ *
+ *  2. ReadContainer - Allows end user to stat a container. For example
+ *     this allows us to return the metadata of a container.
+ *
+ *  3. UpdateContainer - Updates a container metadata.
+
+ *  4. DeleteContainer - This call is made to delete a container.
+ *
+ *  5. ListContainer - Returns the list of containers on this
+ *     datanode. This will be used by tests and tools.
+ *
+ *  6. PutKey - Given a valid container, creates a key.
+ *
+ *  7. GetKey - Allows user to read the metadata of a Key.
+ *
+ *  8. DeleteKey - Deletes a given key.
+ *
+ *  9. ListKey - Returns a list of keys that are present inside
+ *      a given container.
+ *
+ *  10. ReadChunk - Allows us to read a chunk.
+ *
+ *  11. DeleteChunk - Delete an unused chunk.
+ *
+ *  12. WriteChunk - Allows us to write a chunk
+ *
+ *  13. ListChunk - Given a Container/Key returns the list of Chunks.
+ *
+ *  14. CompactChunk - Re-writes a chunk based on Offsets.
+ *
+ *  15. PutSmallFile - A single RPC that combines both putKey and WriteChunk.
+ *
+ *  16. GetSmallFile - A single RPC that combines both getKey and ReadChunk.
+ *
+ *  17. CloseContainer - Closes an open container and makes it immutable.
+ *
+ *  18. CopyContainer - Copies a container from a remote machine.
+ */
+
+enum Type {
+  CreateContainer = 1;
+  ReadContainer = 2;
+  UpdateContainer = 3;
+  DeleteContainer = 4;
+  ListContainer = 5;
+
+  PutKey = 6;
+  GetKey = 7;
+  DeleteKey = 8;
+  ListKey = 9;
+
+  ReadChunk = 10;
+  DeleteChunk = 11;
+  WriteChunk = 12;
+  ListChunk = 13;
+  CompactChunk = 14;
+
+  /** Combines Key and Chunk Operation into Single RPC. */
+  PutSmallFile = 15;
+  GetSmallFile = 16;
+  CloseContainer = 17;
+
+}
+
+
+enum Result {
+  SUCCESS = 1;
+  UNSUPPORTED_REQUEST = 2;
+  MALFORMED_REQUEST = 3;
+  CONTAINER_INTERNAL_ERROR = 4;
+  INVALID_CONFIG = 5;
+  INVALID_FILE_HASH_FOUND = 6;
+  CONTAINER_EXISTS = 7;
+  NO_SUCH_ALGORITHM = 8;
+  CONTAINER_NOT_FOUND = 9;
+  IO_EXCEPTION = 10;
+  UNABLE_TO_READ_METADATA_DB = 11;
+  NO_SUCH_KEY = 12;
+  OVERWRITE_FLAG_REQUIRED = 13;
+  UNABLE_TO_FIND_DATA_DIR = 14;
+  INVALID_WRITE_SIZE = 15;
+  CHECKSUM_MISMATCH = 16;
+  UNABLE_TO_FIND_CHUNK = 17;
+  PROTOC_DECODING_ERROR = 18;
+  INVALID_ARGUMENT = 19;
+  PUT_SMALL_FILE_ERROR = 20;
+  GET_SMALL_FILE_ERROR = 21;
+  CLOSED_CONTAINER_IO = 22;
+  ERROR_CONTAINER_NOT_EMPTY = 23;
+  ERROR_IN_COMPACT_DB = 24;
+  UNCLOSED_CONTAINER_IO = 25;
+  DELETE_ON_OPEN_CONTAINER = 26;
+  CLOSED_CONTAINER_RETRY = 27;
+}
+
+message ContainerCommandRequestProto {
+  required Type cmdType = 1; // Type of the command
+
+  // A string that identifies this command, we generate  Trace ID in Ozone
+  // frontend and this allows us to trace that command all over ozone.
+  optional string traceID = 2;
+
+  // One of the following command is available when the corresponding
+  // cmdType is set. At the protocol level we allow only
+  // one command in each packet.
+  // TODO : Upgrade to Protobuf 2.6 or later.
+  optional   CreateContainerRequestProto createContainer = 3;
+  optional   ReadContainerRequestProto readContainer = 4;
+  optional   UpdateContainerRequestProto updateContainer = 5;
+  optional   DeleteContainerRequestProto deleteContainer = 6;
+  optional   ListContainerRequestProto listContainer = 7;
+
+  optional   PutKeyRequestProto putKey = 8;
+  optional   GetKeyRequestProto getKey = 9;
+  optional   DeleteKeyRequestProto deleteKey = 10;
+  optional   ListKeyRequestProto listKey = 11;
+
+  optional   ReadChunkRequestProto readChunk = 12;
+  optional   WriteChunkRequestProto writeChunk = 13;
+  optional   DeleteChunkRequestProto deleteChunk = 14;
+  optional   ListChunkRequestProto listChunk = 15;
+
+  optional   PutSmallFileRequestProto putSmallFile = 16;
+  optional   GetSmallFileRequestProto getSmallFile = 17;
+  optional   CloseContainerRequestProto closeContainer = 18;
+  required   string datanodeUuid = 19;
+}
+
+message ContainerCommandResponseProto {
+  required Type cmdType = 1;
+  optional string traceID = 2;
+
+  optional   CreateContainerResponseProto createContainer = 3;
+  optional   ReadContainerResponseProto readContainer = 4;
+  optional   UpdateContainerResponseProto updateContainer = 5;
+  optional   DeleteContainerResponseProto deleteContainer = 6;
+  optional   ListContainerResponseProto listContainer = 7;
+
+  optional   PutKeyResponseProto putKey = 8;
+  optional   GetKeyResponseProto getKey = 9;
+  optional   DeleteKeyResponseProto deleteKey = 10;
+  optional   ListKeyResponseProto listKey = 11;
+
+  optional  WriteChunkResponseProto writeChunk = 12;
+  optional  ReadChunkResponseProto readChunk = 13;
+  optional  DeleteChunkResponseProto deleteChunk = 14;
+  optional  ListChunkResponseProto listChunk = 15;
+
+  required Result result = 17;
+  optional string message = 18;
+
+  optional PutSmallFileResponseProto putSmallFile = 19;
+  optional GetSmallFileResponseProto getSmallFile = 20;
+  optional CloseContainerResponseProto closeContainer = 21;
+
+}
+
+message ContainerData {
+  required string name = 1;
+  repeated KeyValue metadata = 2;
+  optional string dbPath = 3;
+  optional string containerPath = 4;
+  optional string hash = 6;
+  optional int64 bytesUsed = 7;
+  optional int64 size = 8;
+  optional int64 keyCount = 9;
+  //TODO: change required after we switch container ID from string to long
+  optional int64 containerID = 10;
+  optional LifeCycleState state = 11 [default = OPEN];
+}
+
+message ContainerMeta {
+  required string fileName = 1;
+  required string hash = 2;
+}
+
+// Container Messages.
+message  CreateContainerRequestProto {
+  required Pipeline pipeline = 1;
+  required ContainerData containerData = 2;
+}
+
+message  CreateContainerResponseProto {
+}
+
+message  ReadContainerRequestProto {
+  required Pipeline pipeline = 1;
+  required string name = 2;
+}
+
+message  ReadContainerResponseProto {
+  optional ContainerData containerData = 2;
+}
+
+message  UpdateContainerRequestProto {
+  required Pipeline pipeline = 1;
+  required ContainerData containerData = 2;
+  optional bool forceUpdate = 3 [default = false];
+}
+
+message  UpdateContainerResponseProto {
+}
+
+message  DeleteContainerRequestProto {
+  required Pipeline pipeline = 1;
+  required string name = 2;
+  optional bool forceDelete = 3 [default = false];
+}
+
+message  DeleteContainerResponseProto {
+}
+
+message  ListContainerRequestProto {
+  required Pipeline pipeline = 1;
+  optional string prefix = 2;
+  required uint32 count = 3; // Max Results to return
+  optional string prevKey = 4;  // if this is not set query from start.
+}
+
+message  ListContainerResponseProto {
+  repeated ContainerData containerData = 1;
+}
+
+message CloseContainerRequestProto {
+  required Pipeline pipeline = 1;
+}
+
+message CloseContainerResponseProto {
+  optional Pipeline pipeline = 1;
+  optional string hash = 2;
+}
+
+message KeyData {
+  required string containerName = 1;
+  required string name = 2;
+  optional int64 flags = 3; // for future use.
+  repeated KeyValue metadata = 4;
+  repeated ChunkInfo chunks = 5;
+}
+
+// Key Messages.
+message  PutKeyRequestProto {
+  required Pipeline pipeline = 1;
+  required KeyData keyData = 2;
+}
+
+message  PutKeyResponseProto {
+}
+
+message  GetKeyRequestProto  {
+  required Pipeline pipeline = 1;
+  required KeyData keyData = 2;
+}
+
+message  GetKeyResponseProto  {
+  required KeyData keyData = 1;
+}
+
+
+message  DeleteKeyRequestProto {
+  required Pipeline pipeline = 1;
+  required string name = 2;
+}
+
+message   DeleteKeyResponseProto {
+}
+
+message  ListKeyRequestProto {
+  required Pipeline pipeline = 1;
+  optional string prefix = 2; // if specified returns keys that match prefix.
+  required string prevKey = 3;
+  required uint32 count = 4;
+
+}
+
+message  ListKeyResponseProto {
+  repeated KeyData keyData = 1;
+}
+
+// Chunk Operations
+
+message ChunkInfo {
+  required string chunkName = 1;
+  required uint64 offset = 2;
+  required uint64 len = 3;
+  optional string checksum = 4;
+  repeated KeyValue metadata = 5;
+}
+
+enum Stage {
+    WRITE_DATA = 1;
+    COMMIT_DATA = 2;
+    COMBINED = 3;
+}
+
+message  WriteChunkRequestProto  {
+  required Pipeline pipeline = 1;
+  required string keyName = 2;
+  required ChunkInfo chunkData = 3;
+  optional bytes data = 4;
+  optional Stage stage = 5 [default = COMBINED];
+}
+
+message  WriteChunkResponseProto {
+}
+
+message  ReadChunkRequestProto  {
+  required Pipeline pipeline = 1;
+  required string keyName = 2;
+  required ChunkInfo chunkData = 3;
+}
+
+message  ReadChunkResponseProto {
+  required Pipeline pipeline = 1;
+  required ChunkInfo chunkData = 2;
+  required bytes data = 3;
+}
+
+message  DeleteChunkRequestProto {
+  required Pipeline pipeline = 1;
+  required string keyName = 2;
+  required ChunkInfo chunkData = 3;
+}
+
+message  DeleteChunkResponseProto {
+}
+
+message  ListChunkRequestProto {
+  required Pipeline pipeline = 1;
+  required string keyName = 2;
+  required string prevChunkName = 3;
+  required uint32 count = 4;
+}
+
+message  ListChunkResponseProto {
+  repeated ChunkInfo chunkData = 1;
+}
+
+/** For small file access combines write chunk and putKey into a single
+RPC */
+
+message PutSmallFileRequestProto {
+  required PutKeyRequestProto key = 1;
+  required ChunkInfo chunkInfo = 2;
+  required bytes data = 3;
+}
+
+
+message PutSmallFileResponseProto {
+
+}
+
+message GetSmallFileRequestProto {
+  required GetKeyRequestProto key = 1;
+}
+
+message GetSmallFileResponseProto {
+  required ReadChunkResponseProto data = 1;
+}
+
+message CopyContainerRequestProto {
+  required string containerName = 1;
+  required uint64 readOffset = 2;
+  optional uint64 len = 3;
+}
+
+message CopyContainerResponseProto {
+  required string archiveName = 1;
+  required uint64 readOffset = 2;
+  required uint64 len = 3;
+  required bool eof = 4;
+  repeated bytes data = 5;
+  optional int64 checksum = 6;
+}
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
new file mode 100644
index 0000000..38d2e16
--- /dev/null
+++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and unstable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *unstable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.hdds.protocol.proto";
+option java_outer_classname = "ScmBlockLocationProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.hdds;
+
+import "hdfs.proto";
+import "hdds.proto";
+
+
+// SCM Block protocol
+/**
+ * keys - batch of block keys to find
+ */
+message GetScmBlockLocationsRequestProto {
+  repeated string keys = 1;
+}
+
+/**
+ * locatedBlocks - for each requested hash, nodes that currently host the
+ *     container for that object key hash
+ */
+message GetScmBlockLocationsResponseProto {
+  repeated ScmLocatedBlockProto locatedBlocks = 1;
+}
+
+/**
+ * Holds the nodes that currently host the blocks for a key.
+ */
+message ScmLocatedBlockProto {
+  required string key = 1;
+  required hadoop.hdds.Pipeline pipeline = 2;
+}
+
+/**
+* Request send to SCM asking allocate block of specified size.
+*/
+message AllocateScmBlockRequestProto {
+  required uint64 size = 1;
+  required ReplicationType type = 2;
+  required hadoop.hdds.ReplicationFactor factor = 3;
+  required string owner = 4;
+
+}
+
+/**
+ * A delete key request sent by KSM to SCM, it contains
+ * multiple number of keys (and their blocks).
+ */
+message DeleteScmKeyBlocksRequestProto {
+  repeated KeyBlocks keyBlocks = 1;
+}
+
+/**
+ * A object key and all its associated blocks.
+ * We need to encapsulate object key name plus the blocks in this potocol
+ * because SCM needs to response KSM with the keys it has deleted.
+ * If the response only contains blocks, it will be very expensive for
+ * KSM to figure out what keys have been deleted.
+ */
+message KeyBlocks {
+  required string key = 1;
+  repeated string blocks = 2;
+}
+
+/**
+ * A delete key response from SCM to KSM, it contains multiple child-results.
+ * Each child-result represents a key deletion result, only if all blocks of
+ * a key are successfully deleted, this key result is considered as succeed.
+ */
+message DeleteScmKeyBlocksResponseProto {
+  repeated DeleteKeyBlocksResultProto results = 1;
+}
+
+/**
+ * A key deletion result. It contains all the block deletion results.
+ */
+message DeleteKeyBlocksResultProto {
+  required string objectKey = 1;
+  repeated DeleteScmBlockResult blockResults = 2;
+}
+
+message DeleteScmBlockResult {
+  enum Result {
+    success = 1;
+    chillMode = 2;
+    errorNotFound = 3;
+    unknownFailure = 4;
+  }
+  required Result result = 1;
+  required string key = 2;
+}
+
+/**
+ * Reply from SCM indicating that the container.
+ */
+message AllocateScmBlockResponseProto {
+  enum Error {
+    success = 1;
+    errorNotEnoughSpace = 2;
+    errorSizeTooBig = 3;
+    unknownFailure = 4;
+  }
+  required Error errorCode = 1;
+  required string key = 2;
+  required hadoop.hdds.Pipeline pipeline = 3;
+  required bool createContainer = 4;
+  optional string errorMessage = 5;
+}
+
+/**
+ * Protocol used from KeySpaceManager to StorageContainerManager.
+ * See request and response messages for details of the RPC calls.
+ */
+service ScmBlockLocationProtocolService {
+
+  /**
+   * Find the set of nodes that currently host the block, as
+   * identified by the key.  This method supports batch lookup by
+   * passing multiple keys.
+   */
+  rpc getScmBlockLocations(GetScmBlockLocationsRequestProto)
+      returns (GetScmBlockLocationsResponseProto);
+
+  /**
+   * Creates a block entry in SCM.
+   */
+  rpc allocateScmBlock(AllocateScmBlockRequestProto)
+      returns (AllocateScmBlockResponseProto);
+
+  /**
+   * Deletes blocks for a set of object keys from SCM.
+   */
+  rpc deleteScmKeyBlocks(DeleteScmKeyBlocksRequestProto)
+      returns (DeleteScmKeyBlocksResponseProto);
+
+  /**
+   * Gets the scmInfo from SCM.
+   */
+  rpc getScmInfo(hadoop.hdds.GetScmInfoRequestProto)
+      returns (hadoop.hdds.GetScmInfoRespsonseProto);
+}
diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
new file mode 100644
index 0000000..d7540a3
--- /dev/null
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -0,0 +1,214 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and unstable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *unstable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.hdds.protocol.proto";
+option java_outer_classname = "StorageContainerLocationProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.hdds;
+
+import "hdfs.proto";
+import "hdds.proto";
+
+/**
+* Request send to SCM asking where the container should be created.
+*/
+message ContainerRequestProto {
+  required string containerName = 1;
+  // Ozone only support replciation of either 1 or 3.
+  required ReplicationFactor replicationFactor = 2;
+  required ReplicationType  replicationType = 3;
+  required string owner = 4;
+
+}
+
+/**
+ * Reply from SCM indicating that the container.
+ */
+message ContainerResponseProto {
+  enum Error {
+    success = 1;
+    errorContainerAlreadyExists = 2;
+    errorContainerMissing = 3;
+  }
+  required Error errorCode = 1;
+  required Pipeline pipeline = 2;
+  optional string errorMessage = 3;
+}
+
+message GetContainerRequestProto {
+  required string containerName = 1;
+}
+
+message GetContainerResponseProto {
+  required Pipeline pipeline = 1;
+}
+
+message SCMListContainerRequestProto {
+  required uint32 count = 1;
+  optional string startName = 2;
+  optional string prefixName = 3;
+}
+
+message SCMListContainerResponseProto {
+  repeated SCMContainerInfo containers = 1;
+}
+
+message SCMDeleteContainerRequestProto {
+  required string containerName = 1;
+}
+
+message SCMDeleteContainerResponseProto {
+  // Empty response
+}
+
+message ObjectStageChangeRequestProto {
+  enum Type {
+    container = 1;
+    pipeline = 2;
+  }
+  // delete/copy operation may be added later
+  enum Op {
+    create = 1;
+    close = 2;
+  }
+  enum Stage {
+    begin = 1;
+    complete = 2;
+  }
+  required string name = 1;
+  required Type type = 2;
+  required Op op= 3;
+  required Stage stage = 4;
+}
+
+message ObjectStageChangeResponseProto {
+  // Empty response
+}
+
+/*
+ NodeQueryRequest sends a request to SCM asking to send a list of nodes that
+ match the NodeState that we are requesting.
+*/
+message NodeQueryRequestProto {
+
+
+  // Repeated, So we can specify more than one status type.
+  // These NodeState types are additive for now, in the sense that
+  // if you specify HEALTHY and FREE_NODE members --
+  // Then you get all healthy node which are not raft members.
+  //
+  // if you specify all healthy and dead nodes, you will get nothing
+  // back. Server is not going to dictate what combinations make sense,
+  // it is entirely up to the caller.
+  // TODO: Support operators like OR and NOT. Currently it is always an
+  // implied AND.
+
+  repeated NodeState query = 1;
+  required QueryScope scope = 2;
+  optional string poolName = 3; // if scope is pool, then pool name is needed.
+}
+
+message NodeQueryResponseProto {
+  required NodePool datanodes = 1;
+}
+
+/**
+  Request to create a replication pipeline.
+ */
+message PipelineRequestProto {
+  required ReplicationType replicationType = 1;
+  required ReplicationFactor replicationFactor = 2;
+
+  // if datanodes are specified then pipelines are created using those
+  // datanodes.
+  optional NodePool nodePool = 3;
+  optional string pipelineID = 4;
+}
+
+message  PipelineResponseProto {
+  enum Error {
+    success = 1;
+    errorPipelineAlreadyExists = 2;
+  }
+  required Error errorCode = 1;
+  optional Pipeline  pipeline = 2;
+  optional string errorMessage = 3;
+}
+
+/**
+ * Protocol used from an HDFS node to StorageContainerManager.  See the request
+ * and response messages for details of the RPC calls.
+ */
+service StorageContainerLocationProtocolService {
+
+  /**
+   * Creates a container entry in SCM.
+   */
+  rpc allocateContainer(ContainerRequestProto) returns (ContainerResponseProto);
+
+  /**
+   * Returns the pipeline for a given container.
+   */
+  rpc getContainer(GetContainerRequestProto) returns (GetContainerResponseProto);
+
+  rpc listContainer(SCMListContainerRequestProto) returns (SCMListContainerResponseProto);
+
+  /**
+   * Deletes a container in SCM.
+   */
+  rpc deleteContainer(SCMDeleteContainerRequestProto) returns (SCMDeleteContainerResponseProto);
+
+  /**
+  * Returns a set of Nodes that meet a criteria.
+  */
+  rpc queryNode(NodeQueryRequestProto)  returns (NodeQueryResponseProto);
+
+  /**
+  * Notify from client when begin or finish container or pipeline operations on datanodes.
+  */
+  rpc notifyObjectStageChange(ObjectStageChangeRequestProto) returns (ObjectStageChangeResponseProto);
+
+  /*
+  *  Apis that Manage Pipelines.
+  *
+  * Pipelines are abstractions offered by SCM and Datanode that allows users
+  * to create a replication pipeline.
+  *
+  *  These following APIs allow command line programs like SCM CLI to list
+  * and manage pipelines.
+  */
+
+  /**
+  *  Creates a replication pipeline.
+  */
+  rpc allocatePipeline(PipelineRequestProto)
+      returns (PipelineResponseProto);
+
+  /**
+  *  Returns information about SCM.
+  */
+  rpc getScmInfo(GetScmInfoRequestProto)
+      returns (GetScmInfoRespsonseProto);
+}
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
new file mode 100644
index 0000000..0b650b4
--- /dev/null
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and unstable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *unstable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.hdds.protocol.proto";
+option java_outer_classname = "HddsProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.hdds;
+
+message DatanodeDetailsProto {
+    // TODO: make the port as a seperate proto message and use it here
+    required string uuid = 1;  // UUID assigned to the Datanode.
+    required string ipAddress = 2;     // IP address
+    required string hostName = 3;      // hostname
+    optional uint32 containerPort = 4 [default = 0];  // Ozone stand_alone protocol
+    optional uint32 ratisPort = 5 [default = 0];      //Ozone ratis port
+    optional uint32 ozoneRestPort = 6 [default = 0];
+}
+
+message PipelineChannel {
+    required string leaderID = 1;
+    repeated DatanodeDetailsProto members = 2;
+    optional LifeCycleState state = 3 [default = OPEN];
+    optional ReplicationType type = 4 [default = STAND_ALONE];
+    optional ReplicationFactor factor = 5 [default = ONE];
+    optional string name = 6;
+}
+
+// A pipeline is composed of PipelineChannel (Ratis/StandAlone) that back a
+// container.
+message Pipeline {
+    required string containerName = 1;
+    required PipelineChannel pipelineChannel = 2;
+}
+
+message KeyValue {
+    required string key = 1;
+    optional string value = 2;
+}
+
+/**
+ * Type of the node.
+ */
+enum NodeType {
+    KSM = 1;
+    SCM = 2;
+    DATANODE = 3;
+}
+
+// Should we rename NodeState to DatanodeState?
+/**
+ * Enum that represents the Node State. This is used in calls to getNodeList
+ * and getNodeCount.
+ */
+enum NodeState {
+    HEALTHY             = 1;
+    STALE               = 2;
+    DEAD                = 3;
+    DECOMMISSIONING     = 4;
+    DECOMMISSIONED      = 5;
+    RAFT_MEMBER         = 6;
+    FREE_NODE           = 7; // Not a member in raft.
+    INVALID             = 8;
+}
+
+enum QueryScope {
+    CLUSTER = 1;
+    POOL = 2;
+}
+
+message Node {
+    required DatanodeDetailsProto nodeID = 1;
+    repeated NodeState nodeStates = 2;
+}
+
+message NodePool {
+    repeated Node nodes = 1;
+}
+
+/**
+ * LifeCycleState for SCM object creation state machine:
+ *    ->Allocated: allocated on SCM but clean has not started creating it yet.
+ *    ->Creating: allocated and assigned to client to create but not ack-ed yet.
+ *    ->Open: allocated on SCM and created on datanodes and ack-ed by a client.
+ *    ->Close: container closed due to space all used or error?
+ *    ->Timeout -> container failed to create on datanodes or ack-ed by client.
+ *    ->Deleting(TBD) -> container will be deleted after timeout
+ * 1. ALLOCATE-ed containers on SCM can't serve key/block related operation
+ *    until ACK-ed explicitly which changes the state to OPEN.
+ * 2. Only OPEN/CLOSED containers can serve key/block related operation.
+ * 3. ALLOCATE-ed containers that are not ACK-ed timely will be TIMEOUT and
+ *    CLEANUP asynchronously.
+ */
+
+enum LifeCycleState {
+    ALLOCATED = 1;
+    CREATING = 2; // Used for container allocated/created by different client.
+    OPEN =3; // Mostly an update to SCM via HB or client call.
+    CLOSING = 4;
+    CLOSED = 5; // !!State after this has not been used yet.
+    DELETING = 6;
+    DELETED = 7; // object is deleted.
+}
+
+enum LifeCycleEvent {
+    CREATE = 1; // A request to client to create this object
+    CREATED = 2;
+    FINALIZE = 3;
+    CLOSE = 4; // !!Event after this has not been used yet.
+    UPDATE = 5;
+    TIMEOUT = 6; // creation has timed out from SCM's View.
+    DELETE = 7;
+    CLEANUP = 8;
+}
+
+message SCMContainerInfo {
+    // TODO : Remove the container name from pipeline.
+    required string containerName = 1;
+    required LifeCycleState state = 2;
+    required Pipeline pipeline = 3;
+    // This is not total size of container, but space allocated by SCM for
+    // clients to write blocks
+    required uint64 allocatedBytes = 4;
+    required uint64 usedBytes = 5;
+    required uint64 numberOfKeys = 6;
+    optional int64 stateEnterTime = 7;
+    required string owner = 8;
+    required int64 containerID = 9;
+}
+
+message GetScmInfoRequestProto {
+}
+
+message GetScmInfoRespsonseProto {
+    required string clusterId = 1;
+    required string scmId = 2;
+}
+
+
+enum ReplicationType {
+    RATIS = 1;
+    STAND_ALONE = 2;
+    CHAINED = 3;
+}
+
+enum ReplicationFactor {
+    ONE = 1;
+    THREE = 3;
+}
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
new file mode 100644
index 0000000..cb0ab18
--- /dev/null
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -0,0 +1,1049 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Do not modify this file directly.  Instead, copy entries that you -->
+<!-- wish to modify from this file into ozone-site.xml and change them -->
+<!-- there.  If ozone-site.xml does not already exist, create it.      -->
+
+<!--Tags supported are OZONE, CBLOCK, MANAGEMENT, SECURITY, PERFORMANCE,   -->
+<!--DEBUG, CLIENT, SERVER, KSM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
+<!--REST, STORAGE, PIPELINE, STANDALONE                                    -->
+
+<configuration>
+
+  <!--Container Settings used by Datanode-->
+  <property>
+    <name>ozone.container.cache.size</name>
+    <value>1024</value>
+    <tag>PERFORMANCE, CONTAINER, STORAGE</tag>
+    <description>The open container is cached on the data node side. We maintain
+      an LRU
+      cache for caching the recently used containers. This setting controls the
+      size of that cache.
+    </description>
+  </property>
+  <property>
+    <name>dfs.container.ipc</name>
+    <value>9859</value>
+    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
+    <description>The ipc port number of container.</description>
+  </property>
+  <property>
+    <name>dfs.container.ipc.random.port</name>
+    <value>false</value>
+    <tag>OZONE, DEBUG, CONTAINER</tag>
+    <description>Allocates a random free port for ozone container. This is used
+      only while
+      running unit tests.
+    </description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.datanode.storage.dir</name>
+    <value/>
+    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS</tag>
+    <description>This directory is used for storing Ratis metadata like logs. If
+      this is
+      not set then default metadata dirs is used. A warning will be logged if
+      this not set. Ideally, this should be mapped to a fast disk like an SSD.
+    </description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.enabled</name>
+    <value>false</value>
+    <tag>OZONE, MANAGEMENT, PIPELINE, RATIS</tag>
+    <description>Ozone supports different kinds of replication pipelines. Ratis
+      is one of
+      the replication pipeline supported by ozone.
+    </description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.ipc</name>
+    <value>9858</value>
+    <tag>OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT</tag>
+    <description>The ipc port number of container.</description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.ipc.random.port</name>
+    <value>false</value>
+    <tag>OZONE,DEBUG</tag>
+    <description>Allocates a random free port for ozone ratis port for the
+      container. This
+      is used only while running unit tests.
+    </description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.rpc.type</name>
+    <value>GRPC</value>
+    <tag>OZONE, RATIS, MANAGEMENT</tag>
+    <description>Ratis supports different kinds of transports like netty, GRPC,
+      Hadoop RPC
+      etc. This picks one of those for this cluster.
+    </description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.num.write.chunk.threads</name>
+    <value>60</value>
+    <tag>OZONE, RATIS, PERFORMANCE</tag>
+    <description>Maximum number of threads in the thread pool that Ratis
+      will use for writing chunks (60 by default).
+    </description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.segment.size</name>
+    <value>1073741824</value>
+    <tag>OZONE, RATIS, PERFORMANCE</tag>
+    <description>The size of the raft segment used by Apache Ratis on datanodes.
+      (1 GB by default)
+    </description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.segment.preallocated.size</name>
+    <value>134217728</value>
+    <tag>OZONE, RATIS, PERFORMANCE</tag>
+    <description>The size of the buffer which is preallocated for raft segment
+      used by Apache Ratis on datanodes.(128 MB by default)
+    </description>
+  </property>
+  <property>
+    <name>ozone.container.report.interval</name>
+    <value>60000ms</value>
+    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
+    <description>Time interval of the datanode to send container report. Each
+      datanode periodically send container report upon receive
+      sendContainerReport from SCM. Unit could be defined with
+      postfix (ns,ms,s,m,h,d)</description>
+  </property>
+  <!--Ozone Settings-->
+  <property>
+    <name>ozone.administrators</name>
+    <value/>
+    <tag>OZONE, SECURITY</tag>
+    <description>Ozone administrator users delimited by the comma.
+      If not set, only the user who launches an ozone service will be the admin
+      user. This property must be set if ozone services are started by different
+      users. Otherwise, the RPC layer will reject calls from other servers which
+      are started by users not in the list.
+    </description>
+  </property>
+  <property>
+    <name>ozone.block.deleting.container.limit.per.interval</name>
+    <value>10</value>
+    <tag>OZONE, PERFORMANCE, SCM</tag>
+    <description>A maximum number of containers to be scanned by block deleting
+      service per
+      time interval. The block deleting service spawns a thread to handle block
+      deletions in a container. This property is used to throttle the number of
+      threads spawned for block deletions.
+    </description>
+  </property>
+  <property>
+    <name>ozone.block.deleting.limit.per.task</name>
+    <value>1000</value>
+    <tag>OZONE, PERFORMANCE, SCM</tag>
+    <description>A maximum number of blocks to be deleted by block deleting
+      service per
+      time interval. This property is used to throttle the actual number of
+      block deletions on a data node per container.
+    </description>
+  </property>
+  <property>
+    <name>ozone.block.deleting.service.interval</name>
+    <value>1m</value>
+    <tag>OZONE, PERFORMANCE, SCM</tag>
+    <description>Time interval of the block deleting service.
+      The block deleting service runs on each datanode periodically and
+      deletes blocks queued for deletion. Unit could be defined with
+      postfix (ns,ms,s,m,h,d)
+    </description>
+  </property>
+  <property>
+    <name>ozone.block.deleting.service.timeout</name>
+    <value>300000ms</value>
+    <tag>OZONE, PERFORMANCE, SCM</tag>
+    <description>A timeout value of block deletion service. If this is set
+      greater than 0,
+      the service will stop waiting for the block deleting completion after this
+      time. If timeout happens to a large proportion of block deletion, this
+      needs to be increased with ozone.block.deleting.limit.per.task. This
+      setting supports multiple time unit suffixes as described in
+      dfs.heartbeat.interval. If no suffix is specified, then milliseconds is
+      assumed.
+    </description>
+  </property>
+  <property>
+    <name>ozone.client.connection.timeout</name>
+    <value>5000ms</value>
+    <tag>OZONE, PERFORMANCE, CLIENT</tag>
+    <description>Connection timeout for Ozone client in milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>ozone.client.protocol</name>
+    <value>org.apache.hadoop.ozone.client.rpc.RpcClient</value>
+    <tag>OZONE, CLIENT, MANAGEMENT</tag>
+    <description>Protocol class to be used by the client to connect to ozone
+      cluster.
+      The build-in implementation includes:
+      org.apache.hadoop.ozone.client.rpc.RpcClient for RPC
+      org.apache.hadoop.ozone.client.rest.RestClient for REST
+      The default is the RpClient. Please do not change this unless you have a
+      very good understanding of what you are doing.
+    </description>
+  </property>
+  <property>
+    <name>ozone.client.socket.timeout</name>
+    <value>5000ms</value>
+    <tag>OZONE, CLIENT</tag>
+    <description>Socket timeout for Ozone client. Unit could be defined with
+      postfix (ns,ms,s,m,h,d)</description>
+  </property>
+  <property>
+    <name>ozone.enabled</name>
+    <value>false</value>
+    <tag>OZONE, REQUIRED</tag>
+    <description>
+      Status of the Ozone Object Storage service is enabled.
+      Set to true to enable Ozone.
+      Set to false to disable Ozone.
+      Unless this value is set to true, Ozone services will not be started in
+      the cluster.
+
+      Please note: By default ozone is disabled on a hadoop cluster.
+    </description>
+  </property>
+  <property>
+    <name>ozone.handler.type</name>
+    <value>distributed</value>
+    <tag>OZONE, REST</tag>
+    <description>
+      Tells ozone which storage handler to use. The possible values are:
+      distributed - The Ozone distributed storage handler, which speaks to
+      KSM/SCM on the backend and provides REST services to clients.
+      local - Local Storage handler strictly for testing - To be removed.
+    </description>
+  </property>
+  <property>
+    <name>ozone.key.deleting.limit.per.task</name>
+    <value>1000</value>
+    <tag>KSM, PERFORMANCE</tag>
+    <description>
+      A maximum number of keys to be scanned by key deleting service
+      per time interval in KSM. Those keys are sent to delete metadata and
+      generate transactions in SCM for next async deletion between SCM
+      and DataNode.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.address</name>
+    <value/>
+    <tag>KSM, REQUIRED</tag>
+    <description>
+      The address of the Ozone KSM service. This allows clients to discover
+      the KSMs address.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.group.rights</name>
+    <value>READ_WRITE</value>
+    <tag>KSM, SECURITY</tag>
+    <description>
+      Default group permissions in Ozone KSM.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.handler.count.key</name>
+    <value>20</value>
+    <tag>KSM, PERFORMANCE</tag>
+    <description>
+      The number of RPC handler threads for KSM service endpoints.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.http-address</name>
+    <value>0.0.0.0:9874</value>
+    <tag>KSM, MANAGEMENT</tag>
+    <description>
+      The address and the base port where the KSM web UI will listen on.
+
+      If the port is 0, then the server will start on a free port. However, it
+      is best to specify a well-known port, so it is easy to connect and see
+      the KSM management UI.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.http-bind-host</name>
+    <value>0.0.0.0</value>
+    <tag>KSM, MANAGEMENT</tag>
+    <description>
+      The actual address the KSM web server will bind to. If this optional
+      the address is set, it overrides only the hostname portion of
+      ozone.ksm.http-address.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.http.enabled</name>
+    <value>true</value>
+    <tag>KSM, MANAGEMENT</tag>
+    <description>
+      Property to enable or disable KSM web user interface.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.https-address</name>
+    <value>0.0.0.0:9875</value>
+    <tag>KSM, MANAGEMENT, SECURITY</tag>
+    <description>
+      The address and the base port where the KSM web UI will listen
+      on using HTTPS.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.https-bind-host</name>
+    <value>0.0.0.0</value>
+    <tag>KSM, MANAGEMENT, SECURITY</tag>
+    <description>
+      The actual address the KSM web server will bind to using HTTPS.
+      If this optional address is set, it overrides only the hostname portion of
+      ozone.ksm.http-address.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.keytab.file</name>
+    <value/>
+    <tag>KSM, SECURITY</tag>
+    <description>
+      The keytab file for Kerberos authentication in KSM.
+    </description>
+  </property>
+  <property>
+    <name>ozone.ksm.db.cache.size.mb</name>
+    <value>128</value>
+    <tag>KSM, PERFORMANCE</tag>
+    <description>
+      The size of KSM DB cache in MB that used for caching files.
+      This value is set to an abnormally low value in the default configuration.
+      That is to make unit testing easy. Generally, this value should be set to