Merge remote-tracking branch 'origin/master' into HDDS-2939
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 3a9133f..81d174c 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2869,4 +2869,36 @@
     <description>Ozone client gets the latest version location.
     </description>
   </property>
+
+  <property>
+    <name>ozone.om.metadata.layout</name>
+    <tag>OZONE, OM</tag>
+    <value>SIMPLE</value>
+    <description>
+      This property is used to define the metadata layout of file system
+      paths. If it is configured as PREFIX in combination with
+      ozone.om.enable.filesystem.paths to true then this allows to perform
+      atomic rename and delete of any directory at any level in the namespace.
+      Defaulting to SIMPLE. Supported values: SIMPLE and PREFIX.
+    </description>
+  </property>
+  <property>
+    <name>ozone.directory.deleting.service.interval</name>
+    <value>1m</value>
+    <tag>OZONE, PERFORMANCE, OM</tag>
+    <description>Time interval of the directory deleting service. It runs on OM
+      periodically and cleanup orphan directory and its sub-tree. For every
+      orphan directory it deletes the sub-path tree structure(dirs/files). It
+      sends sub-files to KeyDeletingService to deletes its blocks. Unit could
+      be defined with postfix (ns,ms,s,m,h,d)
+    </description>
+  </property>
+  <property>
+    <name>ozone.path.deleting.limit.per.task</name>
+    <value>10000</value>
+    <tag>OZONE, PERFORMANCE, OM</tag>
+    <description>A maximum number of paths(dirs/files) to be deleted by
+      directory deleting service per time interval.
+    </description>
+  </property>
 </configuration>
diff --git a/hadoop-hdds/docs/content/design/namespace-support.md b/hadoop-hdds/docs/content/design/namespace-support.md
index 5dbd289..c2a9d12 100644
--- a/hadoop-hdds/docs/content/design/namespace-support.md
+++ b/hadoop-hdds/docs/content/design/namespace-support.md
@@ -1,10 +1,10 @@
 ---
-title: Ozone FS namespace
+title: Ozone FS namespace / prefix table
 summary: Use additional prefix table for indexed data retrieval
-date: 2020-01-20
+date: 2021-04-12
 jira: HDDS-2939
-status: implementing
-author: Supratim Deka, Anu Engineer
+status: implemented
+author: Supratim Deka, Anu Engineer, Rakesh Radhakrishnan
 ---
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,8 +22,9 @@
 
 # Abstract
 
- Flat namespace (like key -> key info) is not efficient for retrieveing directories. (Large segments should be scanned, the whole sub-hierarchy)
+ Flat namespace (like key -> key info) is not efficient for listing/deleting/renaming directories. (Large segments should be scanned, the whole sub-hierarchy). To make deletion / rename fast and atomic (and make the lists faster) the key table is separated for prefix + key table.
 
 # Link
 
- * https://issues.apache.org/jira/secure/attachment/12991926/Ozone%20FS%20Namespace%20Proposal%20v1.0.docx
+ * [Design doc](https://issues.apache.org/jira/secure/attachment/12991926/Ozone%20FS%20Namespace%20Proposal%20v1.0.docx)
+ * [Quick overview](https://issues.apache.org/jira/secure/attachment/13023399/OzoneFS%20Optimizations_DesignOverview_%20HDDS-2939.pdf)
diff --git a/hadoop-hdds/docs/content/feature/PrefixFSO-Delete.png b/hadoop-hdds/docs/content/feature/PrefixFSO-Delete.png
new file mode 100644
index 0000000..ec697ce
--- /dev/null
+++ b/hadoop-hdds/docs/content/feature/PrefixFSO-Delete.png
Binary files differ
diff --git a/hadoop-hdds/docs/content/feature/PrefixFSO-Format.png b/hadoop-hdds/docs/content/feature/PrefixFSO-Format.png
new file mode 100644
index 0000000..37707f4
--- /dev/null
+++ b/hadoop-hdds/docs/content/feature/PrefixFSO-Format.png
Binary files differ
diff --git a/hadoop-hdds/docs/content/feature/PrefixFSO-Rename.png b/hadoop-hdds/docs/content/feature/PrefixFSO-Rename.png
new file mode 100644
index 0000000..c401c10
--- /dev/null
+++ b/hadoop-hdds/docs/content/feature/PrefixFSO-Rename.png
Binary files differ
diff --git a/hadoop-hdds/docs/content/feature/PrefixFSO.md b/hadoop-hdds/docs/content/feature/PrefixFSO.md
new file mode 100644
index 0000000..130f284
--- /dev/null
+++ b/hadoop-hdds/docs/content/feature/PrefixFSO.md
@@ -0,0 +1,75 @@
+---
+title: "Prefix based FileSystem Optimization"
+weight: 2
+menu:
+   main:
+      parent: Features
+summary: Supports atomic rename and delete operation.
+---
+<!---
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+The prefix based FileSystem optimization feature supports atomic rename and
+ delete of any directory at any level in the namespace. Also, it will perform
+  rename and delete of any directory in a deterministic/constant time.
+
+Note: This feature works only when `ozone.om.enable.filesystem.paths` is
+ enabled which means that Hadoop Compatible File System compatibility is
+  favored instead of S3 compatibility. Some irregular S3 key names may be
+   rejected or normalized.
+
+This feature is strongly recommended to be turned ON when Ozone buckets are
+ mainly used via Hadoop compatible interfaces, especially with high number of
+  files in deep directory hierarchy.
+
+## OzoneManager Metadata layout format
+OzoneManager supports two metadata layout formats - simple and prefix.
+
+Simple is the existing OM metadata format, which stores key entry with full path
+ name. In Prefix based optimization, OM metadata format stores intermediate
+  directories into `DirectoryTable` and files into `FileTable` as shown in the
+   below picture. The key to the table is the name of a directory or a file
+    prefixed by the unique identifier of its parent directory, `<parent
+     unique-id>/<filename>`. 
+     
+![FSO Format](PrefixFSO-Format.png)
+
+
+### Directory delete operation with prefix layout: ###
+Following picture describes the OM metadata changes while performing a delete
+ operation on a directory.
+![FSO Delete](PrefixFSO-Delete.png)
+
+### Directory rename operation with prefix layout: ###
+Following picture describes the OM metadata changes while performing a rename
+ operation on a directory.
+![FSO Rename](PrefixFSO-Rename.png)
+
+## Configuration
+By default the feature is disabled. It can be enabled with the following
+ settings in `ozone-site.xml`:
+
+```XML
+<property>
+   <name>ozone.om.enable.filesystem.paths</name>
+   <value>true</value>
+</property>
+<property>
+   <name>ozone.om.metadata.layout</name>
+   <value>PREFIX</value>
+</property>
+```
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index c96ce2d..c68a14f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -688,8 +688,10 @@
     pipelineFactory.shutdown();
     lock.writeLock().lock();
     try {
-      pipelineStore.close();
-      pipelineStore = null;
+      if (pipelineStore != null) {
+        pipelineStore.close();
+        pipelineStore = null;
+      }
     } catch (Exception ex) {
       LOG.error("Pipeline  store close failed", ex);
     } finally {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index d715c82..fb00bc5 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -21,6 +21,7 @@
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.OzoneQuota;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -28,6 +29,7 @@
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
@@ -35,7 +37,9 @@
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.WithMetadata;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
@@ -43,13 +47,16 @@
 
 import java.io.IOException;
 import java.time.Instant;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Stack;
 import java.util.NoSuchElementException;
 
 import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 
 /**
  * A class that encapsulates OzoneBucket.
@@ -555,6 +562,10 @@
    */
   public Iterator<? extends OzoneKey> listKeys(String keyPrefix,
       String prevKey) throws IOException {
+
+    if(OzoneFSUtils.isFSOptimizedBucket(getMetadata())){
+      return new KeyIteratorWithFSO(keyPrefix, prevKey);
+    }
     return new KeyIterator(keyPrefix, prevKey);
   }
 
@@ -564,7 +575,21 @@
    * @throws IOException
    */
   public void deleteKey(String key) throws IOException {
-    proxy.deleteKey(volumeName, name, key);
+    proxy.deleteKey(volumeName, name, key, false);
+  }
+
+  /**
+   * Ozone FS api to delete a directory. Sub directories will be deleted if
+   * recursive flag is true, otherwise it will be non-recursive.
+   *
+   * @param key       Name of the key to be deleted.
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
+   * @throws IOException
+   */
+  public void deleteDirectory(String key, boolean recursive)
+      throws IOException {
+    proxy.deleteKey(volumeName, name, key, recursive);
   }
 
   /**
@@ -817,6 +842,13 @@
     private Iterator<OzoneKey> currentIterator;
     private OzoneKey currentValue;
 
+    String getKeyPrefix() {
+      return keyPrefix;
+    }
+
+    void setKeyPrefix(String keyPrefixPath) {
+      keyPrefix = keyPrefixPath;
+    }
 
     /**
      * Creates an Iterator to iterate over all keys after prevKey in the bucket.
@@ -825,7 +857,7 @@
      * @param keyPrefix
      */
     KeyIterator(String keyPrefix, String prevKey) throws IOException{
-      this.keyPrefix = keyPrefix;
+      setKeyPrefix(keyPrefix);
       this.currentValue = null;
       this.currentIterator = getNextListOfKeys(prevKey).iterator();
     }
@@ -857,10 +889,285 @@
      * @param prevKey
      * @return {@code List<OzoneKey>}
      */
-    private List<OzoneKey> getNextListOfKeys(String prevKey) throws
+    List<OzoneKey> getNextListOfKeys(String prevKey) throws
         IOException {
       return proxy.listKeys(volumeName, name, keyPrefix, prevKey,
           listCacheSize);
     }
   }
+
+
+  /**
+   * An Iterator to iterate over {@link OzoneKey} list.
+   *
+   *                  buck-1
+   *                    |
+   *                    a
+   *                    |
+   *      -----------------------------------
+   *     |           |                       |
+   *     b1          b2                      b3
+   *   -----       --------               ----------
+   *   |    |      |    |   |             |    |     |
+   *  c1   c2     d1   d2  d3             e1   e2   e3
+   *                   |                  |
+   *               --------               |
+   *              |        |              |
+   *           d21.txt   d22.txt        e11.txt
+   *
+   * Say, keyPrefix="a" and prevKey="", then will do Depth-First-Traversal and
+   * visit node to getChildren in below fashion:-
+   * 1. getChildren("a/")  2. getChildren("a/b1")  3. getChildren("a/b1/c1")
+   * 4. getChildren("a/b1/c2")  5. getChildren("a/b2/d1")
+   * 6. getChildren("a/b2/d2")  7. getChildren("a/b2/d3")
+   * 8. getChildren("a/b3/e1")  9. getChildren("a/b3/e2")
+   * 10. getChildren("a/b3/e3")
+   *
+   * Note: Does not guarantee to return the list of keys in a sorted order.
+   */
+  private class KeyIteratorWithFSO extends KeyIterator{
+
+    private Stack<String> stack;
+    private List<OzoneKey> pendingItemsToBeBatched;
+    private boolean addedKeyPrefix;
+
+    /**
+     * Creates an Iterator to iterate over all keys after prevKey in the bucket.
+     * If prevKey is null it iterates from the first key in the bucket.
+     * The returned keys match key prefix.
+     *
+     * @param keyPrefix
+     * @param prevKey
+     */
+    KeyIteratorWithFSO(String keyPrefix, String prevKey) throws IOException {
+      super(keyPrefix, prevKey);
+    }
+
+    @Override
+    List<OzoneKey> getNextListOfKeys(String prevKey) throws IOException {
+      if (stack == null) {
+        stack = new Stack();
+        pendingItemsToBeBatched = new ArrayList<>();
+      }
+
+      // normalize paths
+      if (!addedKeyPrefix) {
+        prevKey = OmUtils.normalizeKey(prevKey, true);
+        String keyPrefixName = "";
+        if (StringUtils.isNotBlank(getKeyPrefix())) {
+          keyPrefixName = OmUtils.normalizeKey(getKeyPrefix(), true);
+        }
+        setKeyPrefix(keyPrefixName);
+      }
+
+      // Get immediate children
+      List<OzoneKey> keysResultList = new ArrayList<>();
+      getChildrenKeys(getKeyPrefix(), prevKey, keysResultList);
+
+      // TODO: Back and Forth seek all the files & dirs, starting from
+      //  startKey till keyPrefix.
+
+      return keysResultList;
+    }
+
+    /**
+     * List children under the given keyPrefix and startKey path. It does
+     * recursive #listStatus calls to list all the sub-keys resultList.
+     *
+     *                  buck-1
+     *                    |
+     *                    a
+     *                    |
+     *      -----------------------------------
+     *     |           |                       |
+     *     b1          b2                      b3
+     *   -----       --------               ----------
+     *   |    |      |    |   |             |    |     |
+     *  c1   c2     d1   d2  d3             e1   e2   e3
+     *                   |                  |
+     *               --------               |
+     *              |        |              |
+     *           d21.txt   d22.txt        e11.txt
+     *
+     * Say, KeyPrefix = "a" and startKey = null;
+     *
+     * Iteration-1) RPC call proxy#listStatus("a").
+     *              Add b3, b2 and b1 to stack.
+     * Iteration-2) pop b1 and do RPC call proxy#listStatus("b1")
+     *              Add c2, c1 to stack.
+     * Iteration-3) pop c1 and do RPC call proxy#listStatus("c1"). Empty list.
+     * Iteration-4) pop c2 and do RPC call proxy#listStatus("c2"). Empty list.
+     * Iteration-5) pop b2 and do RPC call proxy#listStatus("b2")
+     *              Add d3, d2 and d1 to stack.
+     *              ..........
+     *              ..........
+     * Iteration-n) pop e3 and do RPC call proxy#listStatus("e3")
+     *              Reached end of the FS tree.
+     *
+     * @param keyPrefix
+     * @param startKey
+     * @param keysResultList
+     * @return true represents it reached limit batch size, false otherwise.
+     * @throws IOException
+     */
+    private boolean getChildrenKeys(String keyPrefix, String startKey,
+        List<OzoneKey> keysResultList) throws IOException {
+
+      // listStatus API expects a not null 'startKey' value
+      startKey = startKey == null ? "" : startKey;
+
+      // 1. Add pending items to the user key resultList
+      if (addAllPendingItemsToResultList(keysResultList)) {
+        // reached limit batch size.
+        return true;
+      }
+
+      // 2. Get immediate children of keyPrefix, starting with startKey
+      List<OzoneFileStatus> statuses = proxy.listStatus(volumeName, name,
+              keyPrefix, false, startKey, listCacheSize);
+
+      // 3. Special case: ListKey expects keyPrefix element should present in
+      // the resultList, only if startKey is blank. If startKey is not blank
+      // then resultList shouldn't contain the startKey element.
+      // Since proxy#listStatus API won't return keyPrefix element in the
+      // resultList. So, this is to add user given keyPrefix to the return list.
+      addKeyPrefixInfoToResultList(keyPrefix, startKey, keysResultList);
+
+      // 4. Special case: ListKey expects startKey shouldn't present in the
+      // resultList. Since proxy#listStatus API returns startKey element to
+      // the returnList, this function is to remove the startKey element.
+      removeStartKeyIfExistsInStatusList(startKey, statuses);
+
+      boolean reachedLimitCacheSize = false;
+      // This dirList is used to store paths elements in left-to-right order.
+      List<String> dirList = new ArrayList<>();
+
+      // 5. Iterating over the resultStatuses list and add each key to the
+      // resultList. If the listCacheSize reaches then it will add the rest
+      // of the statuses to pendingItemsToBeBatched
+      for (int indx = 0; indx < statuses.size(); indx++) {
+        OzoneFileStatus status = statuses.get(indx);
+        OmKeyInfo keyInfo = status.getKeyInfo();
+        String keyName = keyInfo.getKeyName();
+
+        // Add dir to the dirList
+        if (status.isDirectory()) {
+          dirList.add(keyInfo.getKeyName());
+          // add trailing slash to represent directory
+          keyName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
+        }
+
+        OzoneKey ozoneKey = new OzoneKey(keyInfo.getVolumeName(),
+                keyInfo.getBucketName(), keyName,
+                keyInfo.getDataSize(), keyInfo.getCreationTime(),
+                keyInfo.getModificationTime(),
+                keyInfo.getReplicationConfig());
+
+        // 5.1) Add to the resultList till it reaches limit batch size.
+        // Once it reaches limit, then add rest of the items to
+        // pendingItemsToBeBatched and this will picked in next batch iteration
+        if (!reachedLimitCacheSize && listCacheSize > keysResultList.size()) {
+          keysResultList.add(ozoneKey);
+          reachedLimitCacheSize = listCacheSize <= keysResultList.size();
+        } else {
+          pendingItemsToBeBatched.add(ozoneKey);
+        }
+      }
+
+      // 6. Push elements in reverse order so that the FS tree traversal will
+      // occur in left-to-right fashion.
+      for (int indx = dirList.size() - 1; indx >= 0; indx--) {
+        String dirPathComponent = dirList.get(indx);
+        stack.push(dirPathComponent);
+      }
+
+      if (reachedLimitCacheSize) {
+        return true;
+      }
+
+      // 7. Pop element and seek for its sub-child path(s). Basically moving
+      // seek pointer to next level(depth) in FS tree.
+      while (!stack.isEmpty()) {
+        keyPrefix = stack.pop();
+        if (getChildrenKeys(keyPrefix, "", keysResultList)) {
+          // reached limit batch size.
+          return true;
+        }
+      }
+
+      return false;
+    }
+
+    private void removeStartKeyIfExistsInStatusList(String startKey,
+        List<OzoneFileStatus> statuses) {
+
+      if (StringUtils.isNotBlank(startKey) && !statuses.isEmpty()) {
+        String startKeyPath = startKey;
+        if (startKey.endsWith(OZONE_URI_DELIMITER)) {
+          startKeyPath = OzoneFSUtils.removeTrailingSlashIfNeeded(startKey);
+        }
+        if (StringUtils.equals(statuses.get(0).getKeyInfo().getKeyName(),
+                startKeyPath)) {
+          // remove the duplicateKey from the list.
+          statuses.remove(0);
+        }
+      }
+    }
+
+    private boolean addAllPendingItemsToResultList(List<OzoneKey> keys) {
+
+      Iterator<OzoneKey> ozoneKeyItr = pendingItemsToBeBatched.iterator();
+      while (ozoneKeyItr.hasNext()) {
+        if (listCacheSize <= keys.size()) {
+          // reached limit batch size.
+          return true;
+        }
+        keys.add(ozoneKeyItr.next());
+        ozoneKeyItr.remove();
+      }
+      return false;
+    }
+
+    private void addKeyPrefixInfoToResultList(String keyPrefix,
+        String startKey, List<OzoneKey> keysResultList) throws IOException {
+
+      if (addedKeyPrefix) {
+        return;
+      }
+
+      // setting flag to true.
+      addedKeyPrefix = true;
+
+      // not required to addKeyPrefix
+      // case-1) if keyPrefix is null or empty
+      // case-2) if startKey is not null or empty
+      if (StringUtils.isBlank(keyPrefix) || StringUtils.isNotBlank(startKey)) {
+        return;
+      }
+
+      // TODO: HDDS-4859 will fix the case where startKey not started with
+      //  keyPrefix.
+
+      OzoneFileStatus status = proxy.getOzoneFileStatus(volumeName, name,
+          keyPrefix);
+
+      if (status != null) {
+        OmKeyInfo keyInfo = status.getKeyInfo();
+        String keyName = keyInfo.getKeyName();
+        if (status.isDirectory()) {
+          // add trailing slash to represent directory
+          keyName =
+              OzoneFSUtils.addTrailingSlashIfNeeded(keyInfo.getKeyName());
+        }
+
+        OzoneKey ozoneKey = new OzoneKey(keyInfo.getVolumeName(),
+            keyInfo.getBucketName(), keyName,
+            keyInfo.getDataSize(), keyInfo.getCreationTime(),
+            keyInfo.getModificationTime(),
+            keyInfo.getReplicationConfig());
+        keysResultList.add(ozoneKey);
+      }
+    }
+
+  }
 }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 48a27aa..2addbc1 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -90,6 +90,8 @@
   private boolean isException;
   private final BlockOutputStreamEntryPool blockOutputStreamEntryPool;
 
+  private long clientID;
+
   /**
    * A constructor for testing purpose only.
    */
@@ -125,6 +127,11 @@
     return retryCount;
   }
 
+  @VisibleForTesting
+  public long getClientID() {
+    return clientID;
+  }
+
   @SuppressWarnings({"parameternumber", "squid:S00107"})
   public KeyOutputStream(
       OzoneClientConfig config,
@@ -156,6 +163,7 @@
     this.retryCount = 0;
     this.isException = false;
     this.writeOffset = 0;
+    this.clientID = handler.getId();
   }
 
   /**
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 40aa684..83293d6 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -309,9 +309,12 @@
    * @param volumeName Name of the Volume
    * @param bucketName Name of the Bucket
    * @param keyName Name of the Key
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
    * @throws IOException
    */
-  void deleteKey(String volumeName, String bucketName, String keyName)
+  void deleteKey(String volumeName, String bucketName, String keyName,
+                 boolean recursive)
       throws IOException;
 
   /**
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index a7707f4..b49d05d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -829,7 +829,7 @@
 
   @Override
   public void deleteKey(
-      String volumeName, String bucketName, String keyName)
+      String volumeName, String bucketName, String keyName, boolean recursive)
       throws IOException {
     verifyVolumeName(volumeName);
     verifyBucketName(bucketName);
@@ -838,6 +838,7 @@
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
+        .setRecursive(recursive)
         .build();
     ozoneManagerClient.deleteKey(keyArgs);
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index 577146d..f524cfb 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -290,6 +290,7 @@
     case RecoverTrash:
     case DeleteOpenKeys:
     case RevokeS3Secret:
+    case PurgePaths:
       return false;
     default:
       LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 2eead63..c0ada6a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -244,4 +244,29 @@
       "ozone.fs.trash.checkpoint.interval";
 
   public static final long  OZONE_FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT = 0;
+
+//  This property is used to define the metadata layout of file system
+//  paths. If it is configured as PREFIX in combination with
+//  ozone.om.enable.filesystem.paths to true then this allows to perform
+//  atomic rename and delete of any directory at any level in the namespace.
+//  Defaulting to SIMPLE. Supported values: SIMPLE and PREFIX.
+
+  public static final String OZONE_OM_METADATA_LAYOUT =
+          "ozone.om.metadata.layout";
+  public static final String OZONE_OM_METADATA_LAYOUT_DEFAULT = "SIMPLE";
+
+  public static final String OZONE_OM_METADATA_LAYOUT_PREFIX = "PREFIX";
+
+  /**
+   * Configuration properties for Directory Deleting Service.
+   */
+  public static final String OZONE_DIR_DELETING_SERVICE_INTERVAL =
+      "ozone.directory.deleting.service.interval";
+  public static final String OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT
+      = "60s";
+
+  public static final String OZONE_PATH_DELETING_LIMIT_PER_TASK =
+      "ozone.path.deleting.limit.per.task";
+  public static final int OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT = 10000;
+
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index b676bca..bba97cd 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -233,7 +233,9 @@
 
     QUOTA_EXCEEDED,
 
-    QUOTA_ERROR
+    QUOTA_ERROR,
+
+    DIRECTORY_NOT_EMPTY
 
   }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
new file mode 100644
index 0000000..3d5d6a5
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.util.*;
+
+/**
+ * This class represents the directory information by keeping each component
+ * in the user given path and a pointer to its parent directory element in the
+ * path. Also, it stores directory node related metdata details.
+ */
+public class OmDirectoryInfo extends WithParentObjectId {
+  private String name; // directory name
+
+  private long creationTime;
+  private long modificationTime;
+
+  private List<OzoneAcl> acls;
+
+  public OmDirectoryInfo(Builder builder) {
+    this.name = builder.name;
+    this.acls = builder.acls;
+    this.metadata = builder.metadata;
+    this.objectID = builder.objectID;
+    this.updateID = builder.updateID;
+    this.parentObjectID = builder.parentObjectID;
+    this.creationTime = builder.creationTime;
+    this.modificationTime = builder.modificationTime;
+  }
+
+  /**
+   * Returns new builder class that builds a OmPrefixInfo.
+   *
+   * @return Builder
+   */
+  public static OmDirectoryInfo.Builder newBuilder() {
+    return new OmDirectoryInfo.Builder();
+  }
+
+  /**
+   * Builder for Directory Info.
+   */
+  public static class Builder {
+    private long parentObjectID; // pointer to parent directory
+
+    private long objectID;
+    private long updateID;
+
+    private String name;
+
+    private long creationTime;
+    private long modificationTime;
+
+    private List<OzoneAcl> acls;
+    private Map<String, String> metadata;
+
+    public Builder() {
+      //Default values
+      this.acls = new LinkedList<>();
+      this.metadata = new HashMap<>();
+    }
+
+    public Builder setParentObjectID(long parentObjectId) {
+      this.parentObjectID = parentObjectId;
+      return this;
+    }
+
+    public Builder setObjectID(long objectId) {
+      this.objectID = objectId;
+      return this;
+    }
+
+    public Builder setUpdateID(long updateId) {
+      this.updateID = updateId;
+      return this;
+    }
+
+    public Builder setName(String dirName) {
+      this.name = dirName;
+      return this;
+    }
+
+    public Builder setCreationTime(long newCreationTime) {
+      this.creationTime = newCreationTime;
+      return this;
+    }
+
+    public Builder setModificationTime(long newModificationTime) {
+      this.modificationTime = newModificationTime;
+      return this;
+    }
+
+    public Builder setAcls(List<OzoneAcl> listOfAcls) {
+      if (listOfAcls != null) {
+        this.acls.addAll(listOfAcls);
+      }
+      return this;
+    }
+
+    public Builder addAcl(OzoneAcl ozoneAcl) {
+      if (ozoneAcl != null) {
+        this.acls.add(ozoneAcl);
+      }
+      return this;
+    }
+
+    public Builder addMetadata(String key, String value) {
+      metadata.put(key, value);
+      return this;
+    }
+
+    public Builder addAllMetadata(Map<String, String> additionalMetadata) {
+      if (additionalMetadata != null) {
+        metadata.putAll(additionalMetadata);
+      }
+      return this;
+    }
+
+    public OmDirectoryInfo build() {
+      return new OmDirectoryInfo(this);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return getPath() + ":" + getObjectID();
+  }
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+  public String getPath() {
+    return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName();
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  public long getModificationTime() {
+    return modificationTime;
+  }
+
+  public List<OzoneAcl> getAcls() {
+    return acls;
+  }
+
+  /**
+   * Creates DirectoryInfo protobuf from OmDirectoryInfo.
+   */
+  public OzoneManagerProtocolProtos.DirectoryInfo getProtobuf() {
+    OzoneManagerProtocolProtos.DirectoryInfo.Builder pib =
+            OzoneManagerProtocolProtos.DirectoryInfo.newBuilder().setName(name)
+                    .setCreationTime(creationTime)
+                    .setModificationTime(modificationTime)
+                    .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
+                    .setObjectID(objectID)
+                    .setUpdateID(updateID)
+                    .setParentID(parentObjectID);
+    if (acls != null) {
+      pib.addAllAcls(OzoneAclUtil.toProtobuf(acls));
+    }
+    return pib.build();
+  }
+
+  /**
+   * Parses DirectoryInfo protobuf and creates OmPrefixInfo.
+   * @param dirInfo
+   * @return instance of OmDirectoryInfo
+   */
+  public static OmDirectoryInfo getFromProtobuf(
+          OzoneManagerProtocolProtos.DirectoryInfo dirInfo) {
+    OmDirectoryInfo.Builder opib = OmDirectoryInfo.newBuilder()
+            .setName(dirInfo.getName())
+            .setCreationTime(dirInfo.getCreationTime())
+            .setModificationTime(dirInfo.getModificationTime())
+            .setAcls(OzoneAclUtil.fromProtobuf(dirInfo.getAclsList()));
+    if (dirInfo.getMetadataList() != null) {
+      opib.addAllMetadata(KeyValueUtil
+              .getFromProtobuf(dirInfo.getMetadataList()));
+    }
+    if (dirInfo.hasObjectID()) {
+      opib.setObjectID(dirInfo.getObjectID());
+    }
+    if (dirInfo.hasParentID()) {
+      opib.setParentObjectID(dirInfo.getParentID());
+    }
+    if (dirInfo.hasUpdateID()) {
+      opib.setUpdateID(dirInfo.getUpdateID());
+    }
+    return opib.build();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    OmDirectoryInfo omDirInfo = (OmDirectoryInfo) o;
+    return creationTime == omDirInfo.creationTime &&
+            modificationTime == omDirInfo.modificationTime &&
+            name.equals(omDirInfo.name) &&
+            Objects.equals(metadata, omDirInfo.metadata) &&
+            Objects.equals(acls, omDirInfo.acls) &&
+            objectID == omDirInfo.objectID &&
+            updateID == omDirInfo.updateID &&
+            parentObjectID == omDirInfo.parentObjectID;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(objectID, parentObjectID, name);
+  }
+
+  /**
+   * Return a new copy of the object.
+   */
+  public OmDirectoryInfo copyObject() {
+    OmDirectoryInfo.Builder builder = new Builder()
+            .setName(name)
+            .setCreationTime(creationTime)
+            .setModificationTime(modificationTime)
+            .setParentObjectID(parentObjectID)
+            .setObjectID(objectID)
+            .setUpdateID(updateID);
+
+    acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(),
+            acl.getName(), (BitSet) acl.getAclBitSet().clone(),
+            acl.getAclScope())));
+
+    if (metadata != null) {
+      metadata.forEach((k, v) -> builder.addMetadata(k, v));
+    }
+
+    return builder.build();
+  }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
index 868da8b..7b298d4 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -47,6 +47,7 @@
   private boolean sortDatanodesInPipeline;
   private List<OzoneAcl> acls;
   private boolean latestVersionLocation;
+  private boolean recursive;
 
   @SuppressWarnings("parameternumber")
   private OmKeyArgs(String volumeName, String bucketName, String keyName,
@@ -55,7 +56,7 @@
       String uploadID, int partNumber,
       Map<String, String> metadataMap, boolean refreshPipeline,
       List<OzoneAcl> acls, boolean sortDatanode,
-      boolean latestVersionLocation) {
+      boolean latestVersionLocation, boolean recursive) {
     this.volumeName = volumeName;
     this.bucketName = bucketName;
     this.keyName = keyName;
@@ -70,6 +71,7 @@
     this.acls = acls;
     this.sortDatanodesInPipeline = sortDatanode;
     this.latestVersionLocation = latestVersionLocation;
+    this.recursive = recursive;
   }
 
   public boolean getIsMultipartKey() {
@@ -140,6 +142,10 @@
     return latestVersionLocation;
   }
 
+  public boolean isRecursive() {
+    return recursive;
+  }
+
   @Override
   public Map<String, String> toAuditMap() {
     Map<String, String> auditMap = new LinkedHashMap<>();
@@ -197,6 +203,7 @@
     private boolean sortDatanodesInPipeline;
     private boolean latestVersionLocation;
     private List<OzoneAcl> acls;
+    private boolean recursive;
 
     public Builder setVolumeName(String volume) {
       this.volumeName = volume;
@@ -273,12 +280,17 @@
       return this;
     }
 
+    public Builder setRecursive(boolean isRecursive) {
+      this.recursive = isRecursive;
+      return this;
+    }
+
     public OmKeyArgs build() {
       return new OmKeyArgs(volumeName, bucketName, keyName, dataSize,
           replicationConfig, locationInfoList, isMultipartKey,
           multipartUploadID,
           multipartUploadPartNumber, metadata, refreshPipeline, acls,
-          sortDatanodesInPipeline, latestVersionLocation);
+          sortDatanodesInPipeline, latestVersionLocation, recursive);
     }
 
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 5fba43c..996e04f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -25,11 +25,13 @@
 import java.util.Map;
 import java.util.Objects;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@ -43,7 +45,7 @@
  * This is returned from OM to client, and client use class to talk to
  * datanode. Also, this is the metadata written to om.db on server side.
  */
-public final class OmKeyInfo extends WithObjectID {
+public final class OmKeyInfo extends WithParentObjectId {
   private static final Logger LOG = LoggerFactory.getLogger(OmKeyInfo.class);
   private final String volumeName;
   private final String bucketName;
@@ -57,6 +59,13 @@
   private FileEncryptionInfo encInfo;
 
   /**
+   * Represents leaf node name. This also will be used when the keyName is
+   * created on a FileSystemOptimized(FSO) bucket. For example, the user given
+   * keyName is "a/b/key1" then the fileName stores "key1".
+   */
+  private String fileName;
+
+  /**
    * ACL Information.
    */
   private List<OzoneAcl> acls;
@@ -84,6 +93,21 @@
     this.updateID = updateID;
   }
 
+  @SuppressWarnings("parameternumber")
+  OmKeyInfo(String volumeName, String bucketName, String keyName,
+            String fileName, List<OmKeyLocationInfoGroup> versions,
+            long dataSize, long creationTime, long modificationTime,
+            ReplicationConfig replicationConfig,
+            Map<String, String> metadata,
+            FileEncryptionInfo encInfo, List<OzoneAcl> acls,
+            long parentObjectID, long objectID, long updateID) {
+    this(volumeName, bucketName, keyName, versions, dataSize,
+            creationTime, modificationTime, replicationConfig, metadata,
+            encInfo, acls, objectID, updateID);
+    this.fileName = fileName;
+    this.parentObjectID = parentObjectID;
+  }
+
   public String getVolumeName() {
     return volumeName;
   }
@@ -112,6 +136,19 @@
     this.dataSize = size;
   }
 
+  public void setFileName(String fileName) {
+    this.fileName = fileName;
+  }
+
+  public String getFileName() {
+    return fileName;
+  }
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+
   public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
     return keyLocationVersions.size() == 0? null :
         keyLocationVersions.get(keyLocationVersions.size() - 1);
@@ -294,6 +331,10 @@
     return OzoneAclUtil.setAcl(acls, newAcls);
   }
 
+  public void setParentObjectID(long parentObjectID) {
+    this.parentObjectID = parentObjectID;
+  }
+
   /**
    * Builder of OmKeyInfo.
    */
@@ -312,6 +353,9 @@
     private List<OzoneAcl> acls;
     private long objectID;
     private long updateID;
+    // not persisted to DB. FileName will be the last element in path keyName.
+    private String fileName;
+    private long parentObjectID;
 
     public Builder() {
       this.metadata = new HashMap<>();
@@ -409,11 +453,22 @@
       return this;
     }
 
+    public Builder setFileName(String keyFileName) {
+      this.fileName = keyFileName;
+      return this;
+    }
+
+    public Builder setParentObjectID(long parentID) {
+      this.parentObjectID = parentID;
+      return this;
+    }
+
     public OmKeyInfo build() {
       return new OmKeyInfo(
-          volumeName, bucketName, keyName, omKeyLocationInfoGroups,
-          dataSize, creationTime, modificationTime, replicationConfig, metadata,
-          encInfo, acls, objectID, updateID);
+              volumeName, bucketName, keyName, fileName,
+              omKeyLocationInfoGroups, dataSize, creationTime,
+              modificationTime, replicationConfig, metadata, encInfo, acls,
+              parentObjectID, objectID, updateID);
     }
   }
 
@@ -426,11 +481,33 @@
   }
 
   /**
+   * For network transmit.
+   *
+   * @param fullKeyName the user given full key name
+   * @return key info with the user given full key name
+   */
+  public KeyInfo getProtobuf(String fullKeyName, int clientVersion) {
+    return getProtobuf(false, fullKeyName, clientVersion);
+  }
+
+  /**
    *
    * @param ignorePipeline true for persist to DB, false for network transmit.
    * @return
    */
   public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) {
+    return getProtobuf(ignorePipeline, null, clientVersion);
+  }
+
+  /**
+   * Gets KeyInfo with the user given key name.
+   *
+   * @param ignorePipeline   ignore pipeline flag
+   * @param fullKeyName user given key name
+   * @return key info object
+   */
+  private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName,
+                              int clientVersion) {
     long latestVersion = keyLocationVersions.size() == 0 ? -1 :
         keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
 
@@ -443,7 +520,6 @@
     KeyInfo.Builder kb = KeyInfo.newBuilder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
-        .setKeyName(keyName)
         .setDataSize(dataSize)
         .setType(replicationConfig.getReplicationType())
         .setFactor(ReplicationConfig.getLegacyFactor(replicationConfig))
@@ -454,7 +530,13 @@
         .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
         .addAllAcls(OzoneAclUtil.toProtobuf(acls))
         .setObjectID(objectID)
-        .setUpdateID(updateID);
+        .setUpdateID(updateID)
+        .setParentID(parentObjectID);
+    if (StringUtils.isNotBlank(fullKeyName)) {
+      kb.setKeyName(fullKeyName);
+    } else {
+      kb.setKeyName(keyName);
+    }
     if (encInfo != null) {
       kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo));
     }
@@ -492,6 +574,11 @@
     if (keyInfo.hasUpdateID()) {
       builder.setUpdateID(keyInfo.getUpdateID());
     }
+    if (keyInfo.hasParentID()) {
+      builder.setParentObjectID(keyInfo.getParentID());
+    }
+    // not persisted to DB. FileName will be filtered out from keyName
+    builder.setFileName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
     return builder.build();
   }
 
@@ -503,6 +590,8 @@
         ", key='" + keyName + '\'' +
         ", dataSize='" + dataSize + '\'' +
         ", creationTime='" + creationTime + '\'' +
+        ", objectID='" + objectID + '\'' +
+        ", parentID='" + parentObjectID + '\'' +
         ", replication='" + replicationConfig +
         '}';
   }
@@ -528,12 +617,13 @@
         Objects.equals(metadata, omKeyInfo.metadata) &&
         Objects.equals(acls, omKeyInfo.acls) &&
         objectID == omKeyInfo.objectID &&
-        updateID == omKeyInfo.updateID;
+        updateID == omKeyInfo.updateID &&
+        parentObjectID == omKeyInfo.parentObjectID;
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(volumeName, bucketName, keyName);
+    return Objects.hash(volumeName, bucketName, keyName, parentObjectID);
   }
 
   /**
@@ -549,8 +639,10 @@
         .setDataSize(dataSize)
         .setReplicationConfig(replicationConfig)
         .setFileEncryptionInfo(encInfo)
-        .setObjectID(objectID).setUpdateID(updateID);
-
+        .setObjectID(objectID)
+        .setUpdateID(updateID)
+        .setParentObjectID(parentObjectID)
+        .setFileName(fileName);
 
     keyLocationVersions.forEach(keyLocationVersion ->
         builder.addOmKeyLocationInfoGroup(
@@ -578,4 +670,11 @@
   public void clearFileEncryptionInfo() {
     this.encInfo = null;
   }
+
+  public String getPath() {
+    if (StringUtils.isBlank(getFileName())) {
+      return getKeyName();
+    }
+    return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getFileName();
+  }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
index 51fd5f6..430772a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java
@@ -35,6 +35,30 @@
   private TreeMap<Integer, PartKeyInfo> partKeyInfoList;
 
   /**
+   * A pointer to parent directory used for path traversal. ParentID will be
+   * used only when the multipart key is created into a FileSystemOptimized(FSO)
+   * bucket.
+   * <p>
+   * For example, if a key "a/b/multiKey1" created into a FSOBucket then each
+   * path component will be assigned an ObjectId and linked to its parent path
+   * component using parent's objectID.
+   * <p>
+   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
+   * element.
+   * <p>
+   * ------------------------------------------|
+   * PathComponent |   ObjectID   |   ParentID |
+   * ------------------------------------------|
+   *      a        |     1024     |     512    |
+   * ------------------------------------------|
+   *      b        |     1025     |     1024   |
+   * ------------------------------------------|
+   *   multiKey1   |     1026     |     1025   |
+   * ------------------------------------------|
+   */
+  private long parentID;
+
+  /**
    * Construct OmMultipartKeyInfo object which holds multipart upload
    * information for a key.
    */
@@ -50,6 +74,28 @@
   }
 
   /**
+   * Construct OmMultipartKeyInfo object which holds multipart upload
+   * information for a key.
+   */
+  @SuppressWarnings("parameternumber")
+  public OmMultipartKeyInfo(String id, long creationTime,
+      ReplicationConfig replicationConfig,
+      Map<Integer, PartKeyInfo> list, long objectID, long updateID,
+      long parentObjId) {
+    this(id, creationTime, replicationConfig, list, objectID, updateID);
+    this.parentID = parentObjId;
+  }
+
+  /**
+   * Returns parentID.
+   *
+   * @return long
+   */
+  public long getParentID() {
+    return parentID;
+  }
+
+  /**
    * Returns the uploadID for this multi part upload of a key.
    * @return uploadID
    */
@@ -87,6 +133,7 @@
     private TreeMap<Integer, PartKeyInfo> partKeyInfoList;
     private long objectID;
     private long updateID;
+    private long parentID;
 
     public Builder() {
       this.partKeyInfoList = new TreeMap<>();
@@ -131,9 +178,14 @@
       return this;
     }
 
+    public Builder setParentID(long parentObjId) {
+      this.parentID = parentObjId;
+      return this;
+    }
+
     public OmMultipartKeyInfo build() {
       return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig,
-              partKeyInfoList, objectID, updateID);
+              partKeyInfoList, objectID, updateID, parentID);
     }
   }
 
@@ -156,7 +208,7 @@
     return new OmMultipartKeyInfo(multipartKeyInfo.getUploadID(),
         multipartKeyInfo.getCreationTime(), replicationConfig,
         list, multipartKeyInfo.getObjectID(),
-        multipartKeyInfo.getUpdateID());
+        multipartKeyInfo.getUpdateID(), multipartKeyInfo.getParentID());
   }
 
   /**
@@ -170,7 +222,8 @@
         .setType(replicationConfig.getReplicationType())
         .setFactor(ReplicationConfig.getLegacyFactor(replicationConfig))
         .setObjectID(objectID)
-        .setUpdateID(updateID);
+        .setUpdateID(updateID)
+        .setParentID(parentID);
     partKeyInfoList.forEach((key, value) -> builder.addPartKeyInfoList(value));
     return builder.build();
   }
@@ -198,7 +251,7 @@
     // For partKeyInfoList we can do shallow copy here, as the PartKeyInfo is
     // immutable here.
     return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig,
-            new TreeMap<>(partKeyInfoList), objectID, updateID);
+            new TreeMap<>(partKeyInfoList), objectID, updateID, parentID);
   }
 
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index d1491ed..4f2ba36 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -18,10 +18,14 @@
 package org.apache.hadoop.ozone.om.helpers;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.util.StringUtils;
 
+import javax.annotation.Nonnull;
 import java.nio.file.Paths;
+import java.util.Map;
 
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 
 /**
@@ -116,4 +120,122 @@
     }
     return true;
   }
+
+  /**
+   * The function returns leaf node name from the given absolute path. For
+   * example, the given key path '/a/b/c/d/e/file1' then it returns leaf node
+   * name 'file1'.
+   */
+  public static String getFileName(@Nonnull String keyName) {
+    java.nio.file.Path fileName = Paths.get(keyName).getFileName();
+    if (fileName != null) {
+      return fileName.toString();
+    }
+    // failed to converts a path key
+    return keyName;
+  }
+
+  /**
+   * Verifies whether the childKey is an immediate path under the given
+   * parentKey.
+   *
+   * @param parentKey parent key name
+   * @param childKey  child key name
+   * @return true if childKey is an immediate path under the given parentKey
+   */
+  public static boolean isImmediateChild(String parentKey, String childKey) {
+
+    // Empty childKey has no parent, so just returning false.
+    if (org.apache.commons.lang3.StringUtils.isBlank(childKey)) {
+      return false;
+    }
+    java.nio.file.Path parentPath = Paths.get(parentKey);
+    java.nio.file.Path childPath = Paths.get(childKey);
+
+    java.nio.file.Path childParent = childPath.getParent();
+    // Following are the valid parentKey formats:
+    // parentKey="" or parentKey="/" or parentKey="/a" or parentKey="a"
+    // Following are the valid childKey formats:
+    // childKey="/" or childKey="/a/b" or childKey="a/b"
+    if (org.apache.commons.lang3.StringUtils.isBlank(parentKey)) {
+      return childParent == null ||
+              OM_KEY_PREFIX.equals(childParent.toString());
+    }
+
+    return parentPath.equals(childParent);
+  }
+
+  /**
+   * The function returns parent directory from the given absolute path. For
+   * example, the given key path '/a/b/c/d/e/file1' then it returns parent
+   * directory name as 'e'.
+   *
+   * @param keyName key name
+   * @return parent directory. If not found then return keyName itself.
+   */
+  public static String getParentDir(@Nonnull String keyName) {
+    java.nio.file.Path fileName = Paths.get(keyName).getParent();
+    if (fileName != null) {
+      return fileName.toString();
+    }
+    // no parent directory.
+    return "";
+  }
+
+  /**
+   * This function appends the given file name to the given key name path.
+   *
+   * @param keyName key name
+   * @param fileName  file name
+   * @return full path
+   */
+  public static String appendFileNameToKeyPath(String keyName,
+                                               String fileName) {
+    StringBuilder newToKeyName = new StringBuilder(keyName);
+    newToKeyName.append(OZONE_URI_DELIMITER);
+    newToKeyName.append(fileName);
+    return newToKeyName.toString();
+  }
+
+  /**
+   * Returns the number of path components in the given keyName.
+   *
+   * @param keyName keyname
+   * @return path components count
+   */
+  public static int getFileCount(String keyName) {
+    java.nio.file.Path keyPath = Paths.get(keyName);
+    return keyPath.getNameCount();
+  }
+
+
+  /**
+   * Returns true if the bucket is FS Optimised.
+   * @param bucketMetadata
+   * @return
+   */
+  public static boolean isFSOptimizedBucket(
+      Map<String, String> bucketMetadata) {
+    // layout 'PREFIX' represents optimized FS path
+    boolean metadataLayoutEnabled =
+        org.apache.commons.lang3.StringUtils.equalsIgnoreCase(
+            OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX,
+            bucketMetadata
+                .get(OMConfigKeys.OZONE_OM_METADATA_LAYOUT));
+
+    boolean fsEnabled =
+        Boolean.parseBoolean(bucketMetadata
+            .get(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS));
+
+    return metadataLayoutEnabled && fsEnabled;
+  }
+
+  public static String removeTrailingSlashIfNeeded(String key) {
+    if (key.endsWith(OZONE_URI_DELIMITER)) {
+      java.nio.file.Path keyPath = Paths.get(key);
+      return keyPath.toString();
+    } else {
+      return key;
+    }
+  }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
new file mode 100644
index 0000000..79a135a
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+/**
+ * Object ID with additional parent ID field.
+ */
+public class WithParentObjectId extends WithObjectID {
+  /**
+   * Object ID with additional parent ID field.
+   *
+   * A pointer to parent directory used for path traversal. ParentID will be
+   * used only when the key is created into a FileSystemOptimized(FSO) bucket.
+   * <p>
+   * For example, if a key "a/b/key1" created into a FSOBucket then each
+   * path component will be assigned an ObjectId and linked to its parent path
+   * component using parent's objectID.
+   * <p>
+   * Say, Bucket's ObjectID = 512, which is the parent for its immediate child
+   * element.
+   * <p>
+   * ------------------------------------------|
+   * PathComponent |   ObjectID   |   ParentID |
+   * ------------------------------------------|
+   *      a        |     1024     |     512    |
+   * ------------------------------------------|
+   *      b        |     1025     |     1024   |
+   * ------------------------------------------|
+   *     key1      |     1026     |     1025   |
+   * ------------------------------------------|
+   */
+  @SuppressWarnings("visibilitymodifier")
+  protected long parentObjectID;
+
+  public long getParentObjectID() {
+    return parentObjectID;
+  }
+
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 94fb3f9..c13ef9f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -752,7 +752,8 @@
     KeyArgs keyArgs = KeyArgs.newBuilder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName()).build();
+        .setKeyName(args.getKeyName())
+        .setRecursive(args.isRecursive()).build();
     req.setKeyArgs(keyArgs);
 
     OMRequest omRequest = createOMRequest(Type.DeleteKey)
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
index 4a95e55..1916d25 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java
@@ -73,6 +73,8 @@
 
   public abstract String getKeyName();
 
+  public abstract OzonePrefixPath getOzonePrefixPathViewer();
+
   /**
    * Get PrefixName.
    * A prefix name is like a key name under the bucket but
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
index 42ddbb9..76fb76a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java
@@ -37,6 +37,8 @@
   private final String bucketName;
   private final String name;
 
+  private OzonePrefixPath ozonePrefixPath;
+
   /**
    *
    * @param resType
@@ -46,11 +48,13 @@
    * @param name - keyName/PrefixName
    */
   private OzoneObjInfo(ResourceType resType, StoreType storeType,
-      String volumeName, String bucketName, String name) {
+      String volumeName, String bucketName, String name,
+      OzonePrefixPath ozonePrefixPath) {
     super(resType, storeType);
     this.volumeName = volumeName;
     this.bucketName = bucketName;
     this.name = name;
+    this.ozonePrefixPath = ozonePrefixPath;
   }
 
   @Override
@@ -95,6 +99,10 @@
     return name;
   }
 
+  @Override
+  public OzonePrefixPath getOzonePrefixPathViewer() {
+    return ozonePrefixPath;
+  }
 
   public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj
       proto) {
@@ -154,6 +162,7 @@
     private String volumeName;
     private String bucketName;
     private String name;
+    private OzonePrefixPath ozonePrefixPath;
 
     public static Builder newBuilder() {
       return new Builder();
@@ -207,8 +216,15 @@
       return this;
     }
 
+    public Builder setOzonePrefixPath(OzonePrefixPath ozonePrefixPathViewer) {
+      this.ozonePrefixPath = ozonePrefixPathViewer;
+      return this;
+    }
+
+
     public OzoneObjInfo build() {
-      return new OzoneObjInfo(resType, storeType, volumeName, bucketName, name);
+      return new OzoneObjInfo(resType, storeType, volumeName, bucketName,
+          name, ozonePrefixPath);
     }
   }
 }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzonePrefixPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzonePrefixPath.java
new file mode 100644
index 0000000..4e91d5a
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzonePrefixPath.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.security.acl;
+
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/**
+ * Interface used to lists immediate children(sub-paths) for a given keyPrefix.
+ */
+public interface OzonePrefixPath {
+
+  /**
+   * Returns file status for the given key path.
+   *
+   * @return OzoneFileStatus for the given path.
+   */
+  OzoneFileStatus getOzoneFileStatus();
+
+  /**
+   * Lists immediate children(files or a directories) of the given keyPrefix.
+   * It won't do recursive traversal. The given keyPrefix parameter should be a
+   * directory type.
+   *
+   * Assume following is the Ozone FS tree structure.
+   *
+   *                  buck-1
+   *                    |
+   *                    a
+   *                    |
+   *      -----------------------------------
+   *     |           |                       |
+   *     b1          b2                      b3
+   *   -----       --------               ----------
+   *   |    |      |    |   |             |    |     |
+   *  c1   c2     d1   d2  d3             e1   e2   e3
+   *                   |                  |
+   *               --------               |
+   *              |        |              |
+   *           d21.txt   d22.txt        e11.txt
+   *
+   * Say, KeyPrefix = "a" will return immediate children [a/b1, a/b2, a/b3].
+   * Say, KeyPrefix = "a/b2" will return children [a/b2/d1, a/b2/d2, a/b2/d3].
+   *
+   * @param keyPrefix  keyPrefix name
+   * @return list of immediate files or directories under the given keyPrefix.
+   * @throws IOException
+   */
+  Iterator<? extends OzoneFileStatus> getChildren(String keyPrefix)
+      throws IOException;
+}
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
index 524d2e5..eb1355d 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml
@@ -25,6 +25,11 @@
   env_file:
     - docker-config
 
+x-layout_version:
+  &metadata_layout
+  OZONE-SITE.XML_ozone.om.metadata.layout: ${OZONE_OM_METADATA_LAYOUT:-SIMPLE}
+  OZONE-SITE.XML_ozone.om.enable.filesystem.paths: ${OZONE_OM_ENABLE_FILESYSTEM_PATHS:-false}
+
 x-replication:
   &replication
   OZONE-SITE.XML_ozone.replication: ${OZONE_REPLICATION_FACTOR:-1}
@@ -37,6 +42,7 @@
       - 9882
     environment:
       <<: *replication
+      <<: *metadata_layout
       OZONE_OPTS:
     command: ["ozone","datanode"]
   om:
@@ -45,6 +51,7 @@
       ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
       OZONE_OPTS:
       <<: *replication
+      <<: *metadata_layout
     ports:
       - 9874:9874
       - 9862:9862
@@ -59,12 +66,14 @@
       OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1}
       OZONE_OPTS:
       <<: *replication
+      <<: *metadata_layout
     command: ["ozone","scm"]
   s3g:
     <<: *common-config
     environment:
       OZONE_OPTS:
       <<: *replication
+      <<: *metadata_layout
     ports:
       - 9878:9878
     command: ["ozone","s3g"]
@@ -75,4 +84,5 @@
     environment:
       OZONE_OPTS:
       <<: *replication
+      <<: *metadata_layout
     command: ["ozone","recon"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
index b8d86ac..7695acc 100755
--- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh
@@ -35,12 +35,6 @@
 
 execute_robot_test scm gdpr
 
-for scheme in ofs o3fs; do
-  for bucket in link bucket; do
-    execute_robot_test scm -v SCHEME:${scheme} -v BUCKET_TYPE:${bucket} -N ozonefs-${scheme}-${bucket} ozonefs/ozonefs.robot
-  done
-done
-
 execute_robot_test scm security/ozone-secure-token.robot
 
 for bucket in link generated; do
@@ -56,6 +50,28 @@
 execute_robot_test scm cli
 execute_robot_test scm admincli
 
+
+execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:bucket -N ozonefs-simple-ofs-bucket ozonefs/ozonefs.robot
+execute_robot_test scm -v SCHEME:o3fs -v BUCKET_TYPE:link -N ozonefs-simple-o3fs-link ozonefs/ozonefs.robot
+
+# running FS tests with different config requires restart of the cluster
+export OZONE_KEEP_RESULTS=true
+stop_docker_env
+
+## Restarting the cluster with prefix-layout enabled (FSO)
+export OZONE_OM_METADATA_LAYOUT=PREFIX
+export OZONE_OM_ENABLE_FILESYSTEM_PATHS=true
+start_docker_env
+
+execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-prefix-ofs-link ozonefs/ozonefs.robot
+execute_robot_test scm -v SCHEME:o3fs -v BUCKET_TYPE:bucket -N ozonefs-prefix-o3fs-bucket ozonefs/ozonefs.robot
+
+execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-prefix-layout-objectputget s3/objectputget.robot
+execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-prefix-layout-objectdelete s3/objectdelete.robot
+execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-prefix-layout-objectcopy s3/objectcopy.robot
+execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-prefix-layout-objectmultidelete s3/objectmultidelete.robot
+execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-prefix-layout-MultipartUpload s3/MultipartUpload.robot
+
 stop_docker_env
 
 generate_report
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
new file mode 100644
index 0000000..67b1a14
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
@@ -0,0 +1,402 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.DirectoryDeletingService;
+import org.apache.hadoop.ozone.om.KeyDeletingService;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.Assert.fail;
+
+/**
+ * Directory deletion service test cases.
+ */
+public class TestDirectoryDeletingServiceWithFSO {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestDirectoryDeletingServiceWithFSO.class);
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = Timeout.seconds(300);
+
+  private static boolean isBucketFSOptimized = true;
+  private static boolean enabledFileSystemPaths = true;
+  private static boolean omRatisEnabled = true;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static String volumeName;
+  private static String bucketName;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 1);
+    conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
+    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
+        TimeUnit.MILLISECONDS);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    if (isBucketFSOptimized) {
+      TestOMRequestUtils.configureFSOptimizedPaths(conf,
+          enabledFileSystemPaths, OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    } else {
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+          enabledFileSystemPaths);
+    }
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+
+    fs = FileSystem.get(conf);
+  }
+
+  @AfterClass
+  public static void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @After
+  public void cleanup() {
+    try {
+      Path root = new Path("/");
+      FileStatus[] fileStatuses = fs.listStatus(root);
+      for (FileStatus fileStatus : fileStatuses) {
+        fs.delete(fileStatus.getPath(), true);
+      }
+    } catch (IOException ex) {
+      fail("Failed to cleanup files.");
+    }
+  }
+
+  @Test
+  public void testDeleteEmptyDirectory() throws Exception {
+    Path root = new Path("/rootDir");
+    Path appRoot = new Path(root, "appRoot");
+    fs.mkdirs(appRoot);
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+
+
+    DirectoryDeletingService dirDeletingService =
+        (DirectoryDeletingService) cluster.getOzoneManager().getKeyManager()
+            .getDirDeletingService();
+    // Before delete
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(dirTable, 2);
+
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 0);
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+
+    // Delete the appRoot, empty dir
+    fs.delete(appRoot, true);
+
+    // After Delete
+    checkPath(appRoot);
+
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(dirTable, 1);
+
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 1);
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+
+    Assert.assertTrue(dirTable.iterator().hasNext());
+    Assert.assertEquals(root.getName(),
+        dirTable.iterator().next().getValue().getName());
+
+    Assert.assertTrue(dirDeletingService.getRunCount() > 1);
+  }
+
+  /**
+   * Tests verifies that directories and files are getting purged in multiple
+   * batches.
+   */
+  @Test
+  public void testDeleteWithLargeSubPathsThanBatchSize() throws Exception {
+    Path root = new Path("/rootDir");
+    Path appRoot = new Path(root, "appRoot");
+    // Creates 2 parent dirs from root.
+    fs.mkdirs(appRoot);
+
+    // create 2 more levels. In each level, creates 5 subdirs and 5 subfiles.
+    // This will create total of 3 parentDirs + (3 * 5) childDirs and
+    // Total of (3 * 5) childFiles
+    for (int i = 1; i <= 3; i++) {
+      Path childDir = new Path(appRoot, "parentDir" + i);
+      for (int j = 1; j <= 5; j++) {
+        // total 5 sub-dirs + 5 sub-files = 10 items in this level.
+        Path childSubDir = new Path(childDir, "childDir" + j);
+        Path childSubFile = new Path(childDir, "childFile" + j);
+        ContractTestUtils.touch(fs, childSubFile); // create sub file
+        fs.mkdirs(childSubDir); // create sub dir
+      }
+    }
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager().getKeyTable();
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+
+    DirectoryDeletingService dirDeletingService =
+        (DirectoryDeletingService) cluster.getOzoneManager().getKeyManager()
+            .getDirDeletingService();
+
+    // Before delete
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(keyTable, 15);
+    assertTableRowCount(dirTable, 20);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 0);
+
+    // Delete the appRoot
+    fs.delete(appRoot, true);
+
+    // After Delete
+    checkPath(appRoot);
+
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(keyTable, 0);
+    assertTableRowCount(dirTable, 1);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 15);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 19);
+
+    Assert.assertTrue(dirDeletingService.getRunCount() > 1);
+  }
+
+  @Test
+  public void testDeleteWithMultiLevels() throws Exception {
+    Path root = new Path("/rootDir");
+    Path appRoot = new Path(root, "appRoot");
+
+    for (int i = 1; i <= 3; i++) {
+      Path parent = new Path(appRoot, "parentDir" + i);
+      Path child = new Path(parent, "childFile");
+      ContractTestUtils.touch(fs, child);
+    }
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager().getKeyTable();
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+
+    DirectoryDeletingService dirDeletingService =
+        (DirectoryDeletingService) cluster.getOzoneManager().getKeyManager()
+            .getDirDeletingService();
+
+    // Before delete
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(dirTable, 5);
+    assertTableRowCount(keyTable, 3);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 0);
+
+    // Delete the rootDir, which should delete all keys.
+    fs.delete(root, true);
+
+    // After Delete
+    checkPath(root);
+
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(keyTable, 0);
+    assertTableRowCount(dirTable, 0);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 3);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 5);
+
+    Assert.assertTrue(dirDeletingService.getRunCount() > 1);
+  }
+
+  private void assertSubPathsCount(long pathCount, long expectedCount)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> pathCount >= expectedCount, 1000, 120000);
+  }
+
+  private void assertTableRowCount(Table<String, ?> table, int count)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> assertTableRowCount(count, table), 1000,
+        120000); // 2 minutes
+  }
+
+  private boolean assertTableRowCount(int expectedCount,
+                                      Table<String, ?> table) {
+    long count = 0L;
+    try {
+      count = cluster.getOzoneManager().getMetadataManager()
+          .countRowsInTable(table);
+      LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+          count, expectedCount);
+    } catch (IOException ex) {
+      fail("testDoubleBuffer failed with: " + ex);
+    }
+    return count == expectedCount;
+  }
+
+  private void checkPath(Path path) {
+    try {
+      fs.getFileStatus(path);
+      fail("testRecursiveDelete failed");
+    } catch (IOException ex) {
+      Assert.assertTrue(ex instanceof FileNotFoundException);
+      Assert.assertTrue(ex.getMessage().contains("No such file or directory"));
+    }
+  }
+
+  @Test
+  public void testDeleteFilesAndSubFiles() throws Exception {
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager().getKeyTable();
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+    Table<String, RepeatedOmKeyInfo> deletedKeyTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedTable();
+
+    Path root = new Path("/rootDir2");
+    // Create  parent dir from root.
+    fs.mkdirs(root);
+
+    // Added 10 sub files inside root dir
+    for (int i = 0; i < 5; i++) {
+      Path path = new Path(root, "testKey" + i);
+      try (FSDataOutputStream stream = fs.create(path)) {
+        stream.write(1);
+      }
+    }
+
+    KeyDeletingService keyDeletingService =
+        (KeyDeletingService) cluster.getOzoneManager().getKeyManager()
+            .getDeletingService();
+
+    // Before delete
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(keyTable, 5);
+    assertTableRowCount(dirTable, 1);
+    long prevDeletedKeyCount = keyDeletingService.getDeletedKeyCount().get();
+
+    // Case-1) Delete 3 Files directly.
+    for (int i = 0; i < 3; i++) {
+      Path path = new Path(root, "testKey" + i);
+      fs.delete(path, true);
+    }
+
+    DirectoryDeletingService dirDeletingService =
+        (DirectoryDeletingService) cluster.getOzoneManager().getKeyManager()
+            .getDirDeletingService();
+
+
+    // After delete. 2 more files left out under the root dir
+    assertTableRowCount(keyTable, 2);
+    assertTableRowCount(dirTable, 1);
+
+    // Eventually keys would get cleaned up from deletedTables too
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(deletedKeyTable, 0);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 0);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 0);
+    // verify whether KeyDeletingService has purged the keys
+    long currentDeletedKeyCount = keyDeletingService.getDeletedKeyCount().get();
+    Assert.assertEquals(prevDeletedKeyCount + 3, currentDeletedKeyCount);
+
+
+    // Case-2) Delete dir, this will cleanup sub-files under the deleted dir.
+    fs.delete(root, true);
+
+    // After delete. 2 sub files to be deleted.
+    assertTableRowCount(keyTable, 0);
+    assertTableRowCount(dirTable, 0);
+
+    // Eventually keys would get cleaned up from deletedTables too
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(deletedKeyTable, 0);
+
+    assertSubPathsCount(dirDeletingService.getMovedFilesCount(), 2);
+    assertSubPathsCount(dirDeletingService.getDeletedDirsCount(), 1);
+    // verify whether KeyDeletingService has purged the keys
+    currentDeletedKeyCount = keyDeletingService.getDeletedKeyCount().get();
+    Assert.assertEquals(prevDeletedKeyCount + 5, currentDeletedKeyCount);
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
index ccb16f4..3d4fe2f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
@@ -56,6 +56,8 @@
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
+
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Assert;
 
@@ -119,7 +121,8 @@
 
   private OMMetrics omMetrics;
 
-  private boolean enableFileSystemPaths;
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  protected boolean enableFileSystemPaths;
 
   public TestOzoneFileInterfaces(boolean setDefaultFs,
       boolean useAbsolutePath, boolean enabledFileSystemPaths) {
@@ -134,9 +137,8 @@
     volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
     bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
 
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-        enableFileSystemPaths);
+    OzoneConfiguration conf = getOzoneConfiguration();
+
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .build();
@@ -161,6 +163,14 @@
     omMetrics = cluster.getOzoneManager().getMetrics();
   }
 
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+        enableFileSystemPaths);
+    return conf;
+  }
+
   @After
   public void teardown() throws IOException {
     if (cluster != null) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesWithFSO.java
new file mode 100644
index 0000000..069154d
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfacesWithFSO.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+/**
+ * Test OzoneFileSystem Interfaces - prefix layout.
+ *
+ * This test will test the various interfaces i.e.
+ * create, read, write, getFileStatus
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileInterfacesWithFSO extends TestOzoneFileInterfaces {
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {{false, true, true}});
+  }
+
+  public TestOzoneFileInterfacesWithFSO(boolean setDefaultFs,
+      boolean useAbsolutePath, boolean enabledFileSystemPaths) {
+    super(setDefaultFs, useAbsolutePath, enabledFileSystemPaths);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    TestOMRequestUtils.configureFSOptimizedPaths(conf, enableFileSystemPaths,
+        OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    return conf;
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testReplication() throws IOException {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testPathToKey() throws Exception {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testFileSystemInit() throws IOException {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testDirectory() {
+
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testOzFsReadWrite() {
+
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 9f22165..7351602 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -20,10 +20,12 @@
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -35,9 +37,11 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.TrashPolicy;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -46,21 +50,27 @@
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
 import org.apache.hadoop.ozone.om.TrashPolicyOzone;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.ozone.test.GenericTestUtils;
 
 import org.apache.commons.io.IOUtils;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
 import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -114,6 +124,7 @@
       }
     }
   }
+
   /**
    * Set a timeout for each test.
    */
@@ -123,6 +134,7 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(TestOzoneFileSystem.class);
 
+  private static boolean isBucketFSOptimized = false;
   private static boolean enabledFileSystemPaths;
   private static boolean omRatisEnabled;
 
@@ -135,12 +147,20 @@
 
   private void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setInt(FS_TRASH_INTERVAL_KEY, 2);
-    conf.setInt(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, 1);
+    conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, (float) 0.15);
+    // Trash with 9 second deletes and 6 seconds checkpoints
+    conf.setFloat(FS_TRASH_INTERVAL_KEY, (float) 0.15); // 9 seconds
+    conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, (float) 0.1); // 6 seconds
+
     conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
     conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-            enabledFileSystemPaths);
+    if (isBucketFSOptimized) {
+      TestOMRequestUtils.configureFSOptimizedPaths(conf, enabledFileSystemPaths,
+          OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    } else {
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+          enabledFileSystemPaths);
+    }
     cluster = MiniOzoneCluster.newBuilder(conf)
             .setNumDatanodes(3)
             .build();
@@ -185,6 +205,26 @@
     }
   }
 
+  public static MiniOzoneCluster getCluster() {
+    return cluster;
+  }
+
+  public static FileSystem getFs() {
+    return fs;
+  }
+
+  public static void setIsBucketFSOptimized(boolean isBucketFSO) {
+    isBucketFSOptimized = isBucketFSO;
+  }
+
+  public static String getBucketName() {
+    return bucketName;
+  }
+
+  public static String getVolumeName() {
+    return volumeName;
+  }
+
   @Test
   public void testCreateFileShouldCheckExistenceOfDirWithSameName()
       throws Exception {
@@ -231,6 +271,28 @@
     } catch (FileAlreadyExistsException fae) {
       // ignore as its expected
     }
+
+    // Directory
+    FileStatus fileStatus = fs.getFileStatus(parent);
+    assertEquals("FileStatus did not return the directory",
+            "/d1/d2/d3/d4", fileStatus.getPath().toUri().getPath());
+    assertTrue("FileStatus did not return the directory",
+            fileStatus.isDirectory());
+
+    // invalid sub directory
+    try{
+      fs.getFileStatus(new Path("/d1/d2/d3/d4/key3/invalid"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+    // invalid file name
+    try{
+      fs.getFileStatus(new Path("/d1/d2/d3/d4/invalidkey"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
   }
 
   /**
@@ -349,6 +411,24 @@
       ContractTestUtils.touch(fs, child);
     }
 
+    // delete a dir with sub-file
+    try {
+      FileStatus[] parents = fs.listStatus(grandparent);
+      Assert.assertTrue(parents.length > 0);
+      fs.delete(parents[0].getPath(), false);
+      Assert.fail("Must throw exception as dir is not empty!");
+    } catch (PathIsNotEmptyDirectoryException pde) {
+      // expected
+    }
+
+    // delete a dir with sub-file
+    try {
+      fs.delete(grandparent, false);
+      Assert.fail("Must throw exception as dir is not empty!");
+    } catch (PathIsNotEmptyDirectoryException pde) {
+      // expected
+    }
+
     // Delete the grandparent, which should delete all keys.
     fs.delete(grandparent, true);
 
@@ -536,6 +616,7 @@
   @Test
   public void testListStatusOnLargeDirectory() throws Exception {
     Path root = new Path("/");
+    deleteRootDir(); // cleanup
     Set<String> paths = new TreeSet<>();
     int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2;
     for(int i = 0; i < numDirs; i++) {
@@ -545,6 +626,28 @@
     }
 
     FileStatus[] fileStatuses = o3fs.listStatus(root);
+    // Added logs for debugging failures, to check any sub-path mismatches.
+    Set<String> actualPaths = new TreeSet<>();
+    ArrayList<String> actualPathList = new ArrayList<>();
+    if (numDirs != fileStatuses.length) {
+      for (int i = 0; i < fileStatuses.length; i++) {
+        boolean duplicate =
+                actualPaths.add(fileStatuses[i].getPath().getName());
+        if (!duplicate) {
+          LOG.info("Duplicate path:{} in FileStatusList",
+                  fileStatuses[i].getPath().getName());
+        }
+        actualPathList.add(fileStatuses[i].getPath().getName());
+      }
+      if (numDirs != actualPathList.size()) {
+        LOG.info("actualPathsSize: {}", actualPaths.size());
+        LOG.info("actualPathListSize: {}", actualPathList.size());
+        actualPaths.removeAll(paths);
+        actualPathList.removeAll(paths);
+        LOG.info("actualPaths: {}", actualPaths);
+        LOG.info("actualPathList: {}", actualPathList);
+      }
+    }
     assertEquals(
         "Total directories listed do not match the existing directories",
         numDirs, fileStatuses.length);
@@ -555,6 +658,29 @@
   }
 
   /**
+   * Cleanup files and directories.
+   *
+   * @throws IOException DB failure
+   */
+  protected void deleteRootDir() throws IOException {
+    Path root = new Path("/");
+    FileStatus[] fileStatuses = fs.listStatus(root);
+
+    if (fileStatuses == null) {
+      return;
+    }
+
+    for (FileStatus fStatus : fileStatuses) {
+      fs.delete(fStatus.getPath(), true);
+    }
+
+    fileStatuses = fs.listStatus(root);
+    if (fileStatuses != null) {
+      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
+    }
+  }
+
+  /**
    * Tests listStatus on a path with subdirs.
    */
   @Test
@@ -602,9 +728,51 @@
       stream.seek(fileLength);
       assertEquals(-1, stream.read());
     }
+
+    // non-existent file
+    Path fileNotExists = new Path("/file_notexist");
+    try {
+      fs.open(fileNotExists);
+      Assert.fail("Should throw FILE_NOT_FOUND error as file doesn't exist!");
+    } catch (FileNotFoundException fnfe) {
+      Assert.assertTrue("Expected FILE_NOT_FOUND error",
+              fnfe.getMessage().contains("FILE_NOT_FOUND"));
+    }
   }
 
   @Test
+  public void testAllocateMoreThanOneBlock() throws IOException {
+    Path file = new Path("/file");
+    String str = "TestOzoneFileSystem.testAllocateMoreThanOneBlock";
+    byte[] strBytes = str.getBytes(StandardCharsets.UTF_8);
+    long numBlockAllocationsOrg =
+            cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
+
+    try (FSDataOutputStream out1 = fs.create(file, FsPermission.getDefault(),
+            true, 8, (short) 3, 1, null)) {
+      for (int i = 0; i < 100000; i++) {
+        out1.write(strBytes);
+      }
+    }
+
+    try (FSDataInputStream stream = fs.open(file)) {
+      FileStatus fileStatus = fs.getFileStatus(file);
+      long blkSize = fileStatus.getBlockSize();
+      long fileLength = fileStatus.getLen();
+      Assert.assertTrue("Block allocation should happen",
+              fileLength > blkSize);
+
+      long newNumBlockAllocations =
+              cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
+
+      Assert.assertTrue("Block allocation should happen",
+              (newNumBlockAllocations > numBlockAllocationsOrg));
+
+      stream.seek(fileLength);
+      assertEquals(-1, stream.read());
+    }
+  }
+
   public void testDeleteRoot() throws IOException {
     Path dir = new Path("/dir");
     fs.mkdirs(dir);
@@ -636,6 +804,272 @@
         interimPath.getName(), fileStatus.getPath().getName());
   }
 
+  /**
+   * Case-1) fromKeyName should exist, otw throws exception.
+   */
+  @Test
+  public void testRenameWithNonExistentSource() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final String dir2 = root + "/dir2";
+    final Path source = new Path(fs.getUri().toString() + dir1);
+    final Path destin = new Path(fs.getUri().toString() + dir2);
+
+    // creates destin
+    fs.mkdirs(destin);
+    LOG.info("Created destin dir: {}", destin);
+
+    LOG.info("Rename op-> source:{} to destin:{}}", source, destin);
+    assertFalse("Expected to fail rename as src doesn't exist",
+            fs.rename(source, destin));
+  }
+
+  /**
+   * Case-2) Cannot rename a directory to its own subdirectory.
+   */
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(fs.getUri().toString() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    fs.mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(fs.getUri().toString() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    try {
+      fs.rename(sourceRoot, subDir1);
+      Assert.fail("Should throw exception : Cannot rename a directory to" +
+              " its own subdirectory");
+    } catch (IllegalArgumentException iae) {
+      // expected
+    }
+  }
+
+  /**
+   * Case-3) If src == destin then check source and destin of same type.
+   */
+  @Test
+  public void testRenameSourceAndDestinAreSame() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2Path = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2Path);
+
+    // File rename
+    Path file1 = new Path(fs.getUri().toString() + dir2 + "/file1");
+    ContractTestUtils.touch(fs, file1);
+
+    assertTrue(fs.rename(file1, file1));
+    assertTrue(fs.rename(dir2Path, dir2Path));
+  }
+
+  /**
+   * Case-4) Rename from /a, to /b.
+   * <p>
+   * Expected Result: After rename the directory structure will be /b/a.
+   */
+  @Test
+  public void testRenameToExistingDir() throws Exception {
+    // created /a
+    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
+    fs.mkdirs(aSourcePath);
+
+    // created /b
+    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
+    fs.mkdirs(bDestinPath);
+
+    // Add a sub-directory '/a/c' to '/a'. This is to verify that after
+    // rename sub-directory also be moved.
+    final Path acPath = new Path(fs.getUri().toString() + "/a/c");
+    fs.mkdirs(acPath);
+
+    // Rename from /a to /b.
+    assertTrue("Rename failed", fs.rename(aSourcePath, bDestinPath));
+
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a");
+    final Path bacPath = new Path(fs.getUri().toString() + "/b/a/c");
+    assertTrue("Rename failed", fs.exists(baPath));
+    assertTrue("Rename failed", fs.exists(bacPath));
+  }
+
+  /**
+   * Case-5) If new destin '/dst/source' exists then throws exception.
+   * If destination is a directory then rename source as sub-path of it.
+   * <p>
+   * For example: rename /a to /b will lead to /b/a. This new path should
+   * not exist.
+   */
+  @Test
+  public void testRenameToNewSubDirShouldNotExist() throws Exception {
+    // Case-5.a) Rename directory from /a to /b.
+    // created /a
+    final Path aSourcePath = new Path(fs.getUri().toString() + "/a");
+    fs.mkdirs(aSourcePath);
+
+    // created /b
+    final Path bDestinPath = new Path(fs.getUri().toString() + "/b");
+    fs.mkdirs(bDestinPath);
+
+    // Add a sub-directory '/b/a' to '/b'. This is to verify that rename
+    // throws exception as new destin /b/a already exists.
+    final Path baPath = new Path(fs.getUri().toString() + "/b/a/c");
+    fs.mkdirs(baPath);
+
+    Assert.assertFalse("New destin sub-path /b/a already exists",
+            fs.rename(aSourcePath, bDestinPath));
+
+    // Case-5.b) Rename file from /a/b/c/file1 to /a.
+    // Should be failed since /a/file1 exists.
+    final Path abcPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcPath);
+    Path abcFile1 = new Path(abcPath, "/file1");
+    ContractTestUtils.touch(fs, abcFile1);
+
+    final Path aFile1 = new Path(fs.getUri().toString() + "/a/file1");
+    ContractTestUtils.touch(fs, aFile1);
+
+    final Path aDestinPath = new Path(fs.getUri().toString() + "/a");
+
+    Assert.assertFalse("New destin sub-path /b/a already exists",
+            fs.rename(abcFile1, aDestinPath));
+  }
+
+  /**
+   * Case-6) Rename directory to an existed file, should be failed.
+   */
+  @Test
+  public void testRenameDirToFile() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, file1Destin);
+    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcRootPath);
+    Assert.assertFalse("key already exists /root_dir/file1",
+            fs.rename(abcRootPath, file1Destin));
+  }
+
+  /**
+   * Rename file to a non-existent destin file.
+   */
+  @Test
+  public void testRenameFile() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Source = new Path(fs.getUri().toString() + root
+            + "/file1_Copy");
+    ContractTestUtils.touch(fs, file1Source);
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    assertTrue("Renamed failed", fs.rename(file1Source, file1Destin));
+    assertTrue("Renamed failed: /root/file1", fs.exists(file1Destin));
+
+    /**
+     * Reading several times, this is to verify that OmKeyInfo#keyName cached
+     * entry is not modified. While reading back, OmKeyInfo#keyName will be
+     * prepared and assigned to fullkeyPath name.
+     */
+    for (int i = 0; i < 10; i++) {
+      FileStatus[] fStatus = fs.listStatus(rootPath);
+      assertEquals("Renamed failed", 1, fStatus.length);
+      assertEquals("Wrong path name!", file1Destin, fStatus[0].getPath());
+    }
+  }
+
+  /**
+   * Rename file to an existed directory.
+   */
+  @Test
+  public void testRenameFileToDir() throws Exception {
+    final String root = "/root";
+    Path rootPath = new Path(fs.getUri().toString() + root);
+    fs.mkdirs(rootPath);
+
+    Path file1Destin = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, file1Destin);
+    Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c");
+    fs.mkdirs(abcRootPath);
+    assertTrue("Renamed failed", fs.rename(file1Destin, abcRootPath));
+    assertTrue("Renamed filed: /a/b/c/file1", fs.exists(new Path(abcRootPath,
+            "file1")));
+  }
+
+
+  /**
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2SourcePath);
+
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(fs.getUri().toString() + root + "/b/c");
+    try {
+      fs.rename(dir2SourcePath, destinPath);
+      Assert.fail("Should fail as parent of dst does not exist!");
+    } catch (FileNotFoundException fnfe) {
+      // expected
+    }
+
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(fs.getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(fs, filePath);
+
+    Path newDestinPath = new Path(filePath, "c");
+    try {
+      fs.rename(dir2SourcePath, newDestinPath);
+      Assert.fail("Should fail as parent of dst is a file!");
+    } catch (IOException ioe) {
+      // expected
+    }
+  }
+
+  /**
+   * Rename to the source's parent directory, it will succeed.
+   * 1. Rename from /root_dir/dir1/dir2 to /root_dir.
+   * Expected result : /root_dir/dir2
+   * <p>
+   * 2. Rename from /root_dir/dir1/file1 to /root_dir.
+   * Expected result : /root_dir/file1.
+   */
+  @Test
+  public void testRenameToParentDir() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(fs.getUri().toString() + dir2);
+    fs.mkdirs(dir2SourcePath);
+    final Path destRootPath = new Path(fs.getUri().toString() + root);
+
+    Path file1Source = new Path(fs.getUri().toString() + dir1 + "/file2");
+    ContractTestUtils.touch(fs, file1Source);
+
+    // rename source directory to its parent directory(destination).
+    assertTrue("Rename failed", fs.rename(dir2SourcePath, destRootPath));
+    final Path expectedPathAfterRename =
+            new Path(fs.getUri().toString() + root + "/dir2");
+    assertTrue("Rename failed",
+            fs.exists(expectedPathAfterRename));
+
+    // rename source file to its parent directory(destination).
+    assertTrue("Rename failed", fs.rename(file1Source, destRootPath));
+    final Path expectedFilePathAfterRename =
+            new Path(fs.getUri().toString() + root + "/file2");
+    assertTrue("Rename failed",
+            fs.exists(expectedFilePathAfterRename));
+  }
+
   @Test
   public void testRenameDir() throws Exception {
     final String dir = "/root_dir/dir1";
@@ -657,6 +1091,7 @@
     LambdaTestUtils.intercept(IllegalArgumentException.class, "Wrong FS",
         () -> fs.rename(new Path(fs.getUri().toString() + "fake" + dir), dest));
   }
+
   private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory)
       throws IOException {
     String key = o3fs.pathToKey(keyPath);
@@ -805,8 +1240,8 @@
     Assert.assertTrue(trash.getConf().getClass(
         "fs.trash.classname", TrashPolicy.class).
         isAssignableFrom(TrashPolicyOzone.class));
-    Assert.assertEquals(1, trash.getConf().
-        getInt(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, 0));
+    Assert.assertEquals((float) 0.15, trash.getConf().
+        getFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, 0), 0);
     // Call moveToTrash. We can't call protected fs.rename() directly
     trash.moveToTrash(path);
 
@@ -842,4 +1277,61 @@
       }
     }, 1000, 120000);
   }
+
+  @Test
+  public void testListStatusOnLargeDirectoryForACLCheck() throws Exception {
+    String keyName = "dir1/dir2/testListStatusOnLargeDirectoryForACLCheck";
+    Path root = new Path(OZONE_URI_DELIMITER, keyName);
+    Set<String> paths = new TreeSet<>();
+    int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2;
+    for (int i = 0; i < numDirs; i++) {
+      Path p = new Path(root, String.valueOf(i));
+      getFs().mkdirs(p);
+      paths.add(keyName + OM_KEY_PREFIX + p.getName());
+    }
+
+    // unknown keyname
+    try {
+      new OzonePrefixPathImpl(getVolumeName(), getBucketName(), "invalidKey",
+          cluster.getOzoneManager().getKeyManager());
+      Assert.fail("Non-existent key name!");
+    } catch (OMException ome) {
+      Assert.assertEquals(OMException.ResultCodes.KEY_NOT_FOUND,
+          ome.getResult());
+    }
+
+    OzonePrefixPathImpl ozonePrefixPath =
+        new OzonePrefixPathImpl(getVolumeName(), getBucketName(), keyName,
+            cluster.getOzoneManager().getKeyManager());
+
+    OzoneFileStatus status = ozonePrefixPath.getOzoneFileStatus();
+    Assert.assertNotNull(status);
+    Assert.assertEquals(keyName, status.getTrimmedName());
+    Assert.assertTrue(status.isDirectory());
+
+    Iterator<? extends OzoneFileStatus> pathItr =
+        ozonePrefixPath.getChildren(keyName);
+    Assert.assertTrue("Failed to list keyPath:" + keyName, pathItr.hasNext());
+
+    Set<String> actualPaths = new TreeSet<>();
+    while (pathItr.hasNext()) {
+      String pathname = pathItr.next().getTrimmedName();
+      actualPaths.add(pathname);
+
+      // no subpaths, expected an empty list
+      Iterator<? extends OzoneFileStatus> subPathItr =
+          ozonePrefixPath.getChildren(pathname);
+      Assert.assertNotNull(subPathItr);
+      Assert.assertFalse("Failed to list keyPath: " + pathname,
+          subPathItr.hasNext());
+    }
+
+    Assert.assertEquals("ListStatus failed", paths.size(),
+        actualPaths.size());
+
+    for (String pathname : actualPaths) {
+      paths.remove(pathname);
+    }
+    Assert.assertTrue("ListStatus failed:" + paths, paths.isEmpty());
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
new file mode 100644
index 0000000..fef2472
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.debug.PrefixParser;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * Test Ozone Prefix Parser.
+ */
+public class TestOzoneFileSystemPrefixParser {
+
+  private static MiniOzoneCluster cluster = null;
+
+  private static FileSystem fs;
+
+  private static String volumeName;
+
+  private static String bucketName;
+
+  private static OzoneConfiguration configuration;
+
+  private static Path dir;
+  private static Path file;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
+
+    configuration = new OzoneConfiguration();
+
+    TestOMRequestUtils.configureFSOptimizedPaths(configuration,
+        true, OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+
+    cluster = MiniOzoneCluster.newBuilder(configuration)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
+
+    String rootPath = String
+        .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName,
+            volumeName);
+    fs = FileSystem.get(new URI(rootPath + "/test.txt"), configuration);
+
+    dir = new Path("/a/b/c/d/e");
+    fs.mkdirs(dir);
+    file = new Path("/a/b/c/file1");
+    FSDataOutputStream os = fs.create(file);
+    os.close();
+  }
+
+  @AfterClass
+  public static void teardown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @Test(timeout = 120000)
+  public void testPrefixParsePath() throws Exception {
+
+    cluster.stop();
+
+    // Directory Path
+    verifyPrefixParsePath(dir.getParent(), 4, 0, 0, 1);
+
+    // File Path
+    verifyPrefixParsePath(file, 3, 1, 1, 1);
+
+    // Verify invalid path
+    testPrefixParseWithInvalidPaths();
+  }
+
+  private void assertPrefixStats(PrefixParser parser, int volumeCount,
+      int bucketCount, int intermediateDirCount, int nonExistentDirCount,
+      int fileCount, int dirCount) {
+    Assert.assertEquals(volumeCount,
+        parser.getParserStats(PrefixParser.Types.VOLUME));
+    Assert.assertEquals(bucketCount,
+        parser.getParserStats(PrefixParser.Types.BUCKET));
+    Assert.assertEquals(intermediateDirCount,
+        parser.getParserStats(PrefixParser.Types.INTERMEDIATE_DIRECTORY));
+    Assert.assertEquals(nonExistentDirCount,
+        parser.getParserStats(PrefixParser.Types.NON_EXISTENT_DIRECTORY));
+    Assert.assertEquals(fileCount,
+        parser.getParserStats(PrefixParser.Types.FILE));
+    Assert.assertEquals(dirCount,
+        parser.getParserStats(PrefixParser.Types.DIRECTORY));
+  }
+
+  private void testPrefixParseWithInvalidPaths() throws Exception {
+    PrefixParser invalidVolumeParser = new PrefixParser();
+    String invalidVolumeName =
+        RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    invalidVolumeParser.parse(invalidVolumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        file.toString());
+    assertPrefixStats(invalidVolumeParser, 0, 0, 0, 0, 0, 0);
+
+    PrefixParser invalidBucketParser = new PrefixParser();
+    String invalidBucketName =
+        RandomStringUtils.randomAlphabetic(10).toLowerCase();
+    invalidBucketParser.parse(volumeName, invalidBucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        file.toString());
+    assertPrefixStats(invalidBucketParser, 1, 0, 0, 0, 0, 0);
+
+
+    Path invalidIntermediateDir = new Path(file.getParent(), "xyz");
+    PrefixParser invalidIntermediateDirParser = new PrefixParser();
+    invalidIntermediateDirParser.parse(volumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(),
+        invalidIntermediateDir.toString());
+
+    assertPrefixStats(invalidIntermediateDirParser, 1, 1, 3, 1, 1, 1);
+
+  }
+
+  private void verifyPrefixParsePath(Path parent, int intermediateDirCount,
+      int nonExistentDirCount, int fileCount, int dirCount) throws Exception {
+    PrefixParser parser = new PrefixParser();
+
+    parser.parse(volumeName, bucketName,
+        OMStorage.getOmDbDir(configuration).getPath(), parent.toString());
+
+    assertPrefixStats(parser, 1, 1, intermediateDirCount, nonExistentDirCount,
+        fileCount, dirCount);
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithFSO.java
new file mode 100644
index 0000000..270a46a
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithFSO.java
@@ -0,0 +1,479 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+/**
+ * Ozone file system tests that are not covered by contract tests,
+ * - prefix layout.
+ *
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileSystemWithFSO extends TestOzoneFileSystem {
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(
+            new Object[]{true, true},
+            new Object[]{true, false});
+  }
+
+  @BeforeClass
+  public static void init() {
+    setIsBucketFSOptimized(true);
+  }
+
+  public TestOzoneFileSystemWithFSO(boolean setDefaultFs,
+      boolean enableOMRatis) {
+    super(setDefaultFs, enableOMRatis);
+  }
+
+  @After
+  @Override
+  public void cleanup() {
+    super.cleanup();
+    try {
+      deleteRootDir();
+    } catch (IOException e) {
+      LOG.info("Failed to cleanup DB tables.", e);
+      fail("Failed to cleanup DB tables." + e.getMessage());
+    }
+  }
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestOzoneFileSystemWithFSO.class);
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testGetDirectoryModificationTime() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testOzoneFsServiceLoader() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testCreateWithInvalidPaths() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Test
+  public void testListStatusWithoutRecursiveSearch() throws Exception {
+    /*
+     * Op 1. create file -> /key1
+     * Op 2. create dir -> /d1/d2
+     * Op 3. create dir -> /d1/d3
+     * Op 4. create dir -> /d1/d4
+     * Op 5. create file -> /d1/key1
+     * Op 6. create file -> /d2/key1
+     * Op 7. create file -> /d1/d2/key1
+     */
+    Path key1 = new Path("/key1");
+    try (FSDataOutputStream outputStream = getFs().create(key1,
+            false)) {
+      assertNotNull("Should be able to create file: key1",
+              outputStream);
+    }
+    Path d1 = new Path("/d1");
+    Path dir1Key1 = new Path(d1, "key1");
+    try (FSDataOutputStream outputStream = getFs().create(dir1Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir1Key1,
+              outputStream);
+    }
+    Path d2 = new Path("/d2");
+    Path dir2Key1 = new Path(d2, "key1");
+    try (FSDataOutputStream outputStream = getFs().create(dir2Key1, false)) {
+      assertNotNull("Should be able to create file: " + dir2Key1,
+              outputStream);
+    }
+    Path dir1Dir2 = new Path("/d1/d2/");
+    Path dir1Dir2Key1 = new Path(dir1Dir2, "key1");
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: " + dir1Dir2Key1,
+              outputStream);
+    }
+    Path d1Key2 = new Path(d1, "key2");
+    try (FSDataOutputStream outputStream = getFs().create(d1Key2, false)) {
+      assertNotNull("Should be able to create file: " + d1Key2,
+              outputStream);
+    }
+
+    Path dir1Dir3 = new Path("/d1/d3/");
+    Path dir1Dir4 = new Path("/d1/d4/");
+
+    getFs().mkdirs(dir1Dir3);
+    getFs().mkdirs(dir1Dir4);
+
+    String bucketName = getBucketName();
+    String volumeName = getVolumeName();
+
+    // Root Directory
+    FileStatus[] fileStatusList = getFs().listStatus(new Path("/"));
+    assertEquals("FileStatus should return files and directories",
+            3, fileStatusList.length);
+    ArrayList<String> expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d2");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-1 sub-dirs
+    fileStatusList = getFs().listStatus(new Path("/d1"));
+    assertEquals("FileStatus should return files and directories",
+            5, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d3");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d4");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/key1");
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/key2");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-2 sub-dirs
+    fileStatusList = getFs().listStatus(new Path("/d1/d2"));
+    assertEquals("FileStatus should return files and directories",
+            1, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" +
+            "key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // level-2 key2
+    fileStatusList = getFs().listStatus(new Path("/d1/d2/key1"));
+    assertEquals("FileStatus should return files and directories",
+            1, fileStatusList.length);
+    expectedPaths = new ArrayList<>();
+    expectedPaths.add("o3fs://" + bucketName + "." + volumeName + "/d1/d2/" +
+            "key1");
+    for (FileStatus fileStatus : fileStatusList) {
+      expectedPaths.remove(fileStatus.getPath().toString());
+    }
+    assertEquals("Failed to return the filestatus[]" + expectedPaths,
+            0, expectedPaths.size());
+
+    // invalid root key
+    try {
+      fileStatusList = getFs().listStatus(new Path("/key2"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+    }
+    try {
+      fileStatusList = getFs().listStatus(new Path("/d1/d2/key2"));
+      fail("Should throw FileNotFoundException");
+    } catch (FileNotFoundException fnfe) {
+      // ignore as its expected
+
+    }
+  }
+
+  @Test
+  public void testListFilesRecursive() throws Exception {
+    /*
+     * Op 1. create file -> /d1/d1/d2/key1
+     * Op 2. create dir -> /key1
+     * Op 3. create dir -> /key2
+     * Op 4. create dir -> /d1/d2/d1/d2/key1
+     */
+    Path dir1Dir1Dir2Key1 = new Path("/d1/d1/d2/key1");
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: " + dir1Dir1Dir2Key1,
+              outputStream);
+    }
+    Path key1 = new Path("/key1");
+    try (FSDataOutputStream outputStream = getFs().create(key1, false)) {
+      assertNotNull("Should be able to create file: " + key1,
+              outputStream);
+    }
+    Path key2 = new Path("/key2");
+    try (FSDataOutputStream outputStream = getFs().create(key2, false)) {
+      assertNotNull("Should be able to create file: key2",
+              outputStream);
+    }
+    Path dir1Dir2Dir1Dir2Key1 = new Path("/d1/d2/d1/d2/key1");
+    try (FSDataOutputStream outputStream = getFs().create(dir1Dir2Dir1Dir2Key1,
+            false)) {
+      assertNotNull("Should be able to create file: "
+              + dir1Dir2Dir1Dir2Key1, outputStream);
+    }
+    RemoteIterator<LocatedFileStatus> fileStatusItr = getFs().listFiles(
+            new Path("/"), true);
+    String uriPrefix = "o3fs://" + getBucketName() + "." + getVolumeName();
+    ArrayList<String> expectedPaths = new ArrayList<>();
+    expectedPaths.add(uriPrefix + dir1Dir1Dir2Key1.toString());
+    expectedPaths.add(uriPrefix + key1.toString());
+    expectedPaths.add(uriPrefix + key2.toString());
+    expectedPaths.add(uriPrefix + dir1Dir2Dir1Dir2Key1.toString());
+    int expectedFilesCount = expectedPaths.size();
+    int actualCount = 0;
+    while (fileStatusItr.hasNext()) {
+      LocatedFileStatus status = fileStatusItr.next();
+      expectedPaths.remove(status.getPath().toString());
+      actualCount++;
+    }
+    assertEquals("Failed to get all the files: " + expectedPaths,
+            expectedFilesCount, actualCount);
+    assertEquals("Failed to get all the files: " + expectedPaths, 0,
+            expectedPaths.size());
+
+    // Recursive=false
+    fileStatusItr = getFs().listFiles(new Path("/"), false);
+    expectedPaths.clear();
+    expectedPaths.add(uriPrefix + "/key1");
+    expectedPaths.add(uriPrefix + "/key2");
+    expectedFilesCount = expectedPaths.size();
+    actualCount = 0;
+    while (fileStatusItr.hasNext()) {
+      LocatedFileStatus status = fileStatusItr.next();
+      expectedPaths.remove(status.getPath().toString());
+      actualCount++;
+    }
+    assertEquals("Failed to get all the files: " + expectedPaths, 0,
+            expectedPaths.size());
+    assertEquals("Failed to get all the files: " + expectedPaths,
+            expectedFilesCount, actualCount);
+  }
+
+  /**
+   * Case-2) Cannot rename a directory to its own subdirectory.
+   */
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(getFs().getUri().toString() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    getFs().mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(getFs().getUri().toString() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    //  rename should fail and return false
+    Assert.assertFalse(getFs().rename(sourceRoot, subDir1));
+  }
+
+  /**
+   * Cleanup keyTable and directoryTable explicitly as FS delete operation
+   * is not yet supported.
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(getFs().getUri().toString() + dir2);
+    getFs().mkdirs(dir2SourcePath);
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(getFs().getUri().toString()
+            + root + "/b/c");
+
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, destinPath));
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(getFs().getUri().toString() + root + "/file1");
+    ContractTestUtils.touch(getFs(), filePath);
+    Path newDestinPath = new Path(filePath, "c");
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, newDestinPath));
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-2939")
+  public void testListStatusWithIntermediateDir() throws Exception {
+  }
+
+  @Override
+  @Test
+  @Ignore("TODO:HDDS-5012")
+  public void testListStatusOnLargeDirectory() throws Exception {
+  }
+
+  @Test
+  public void testMultiLevelDirs() throws Exception {
+    // reset metrics
+    long numKeys = getCluster().getOzoneManager().getMetrics().getNumKeys();
+    getCluster().getOzoneManager().getMetrics().decNumKeys(numKeys);
+    Assert.assertEquals(0,
+        getCluster().getOzoneManager().getMetrics().getNumKeys());
+
+    // Op 1. create dir -> /d1/d2/d3/d4/
+    // Op 2. create dir -> /d1/d2/d3/d4/d5
+    // Op 3. create dir -> /d1/d2/d3/d4/d6
+    Path parent = new Path("/d1/d2/d3/d4/");
+    getFs().mkdirs(parent);
+
+    OMMetadataManager omMgr =
+        getCluster().getOzoneManager().getMetadataManager();
+    OmBucketInfo omBucketInfo = omMgr.getBucketTable()
+        .get(omMgr.getBucketKey(getVolumeName(), getBucketName()));
+    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+    ArrayList<String> dirKeys = new ArrayList<>();
+    long d1ObjectID =
+        verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1", dirKeys, omMgr);
+    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys, omMgr);
+    long d3ObjectID =
+        verifyDirKey(d2ObjectID, "d3", "/d1/d2/d3", dirKeys, omMgr);
+    long d4ObjectID =
+        verifyDirKey(d3ObjectID, "d4", "/d1/d2/d3/d4", dirKeys, omMgr);
+
+    Assert.assertEquals("Wrong OM numKeys metrics", 4,
+        getCluster().getOzoneManager().getMetrics().getNumKeys());
+
+    // create sub-dirs under same parent
+    Path subDir5 = new Path("/d1/d2/d3/d4/d5");
+    getFs().mkdirs(subDir5);
+    Path subDir6 = new Path("/d1/d2/d3/d4/d6");
+    getFs().mkdirs(subDir6);
+    long d5ObjectID =
+        verifyDirKey(d4ObjectID, "d5", "/d1/d2/d3/d4/d5", dirKeys, omMgr);
+    long d6ObjectID =
+        verifyDirKey(d4ObjectID, "d6", "/d1/d2/d3/d4/d6", dirKeys, omMgr);
+    Assert.assertTrue(
+        "Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID
+            + "/d6] of same parent!", d5ObjectID != d6ObjectID);
+
+    Assert.assertEquals("Wrong OM numKeys metrics", 6,
+        getCluster().getOzoneManager().getMetrics().getNumKeys());
+  }
+
+  @Test
+  public void testCreateFile() throws Exception {
+    // Op 1. create dir -> /d1/d2/d3/d4/
+    Path parent = new Path("/d1/d2/");
+    Path file = new Path(parent, "file1");
+    FSDataOutputStream outputStream = getFs().create(file);
+    String openFileKey = "";
+
+    OMMetadataManager omMgr =
+        getCluster().getOzoneManager().getMetadataManager();
+    OmBucketInfo omBucketInfo = omMgr.getBucketTable()
+        .get(omMgr.getBucketKey(getVolumeName(), getBucketName()));
+    Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+    ArrayList<String> dirKeys = new ArrayList<>();
+    long d1ObjectID =
+        verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1", dirKeys, omMgr);
+    long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys, omMgr);
+    openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
+
+    // trigger CommitKeyRequest
+    outputStream.close();
+
+    OmKeyInfo omKeyInfo = omMgr.getKeyTable().get(openFileKey);
+    Assert.assertNotNull("Invalid Key!", omKeyInfo);
+    verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
+
+    // wait for DB updates
+    GenericTestUtils.waitFor(() -> {
+      try {
+        return omMgr.getOpenKeyTable().isEmpty();
+      } catch (IOException e) {
+        LOG.error("DB failure!", e);
+        Assert.fail("DB failure!");
+        return false;
+      }
+    }, 1000, 120000);
+  }
+
+  private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
+      long parentID) {
+    Assert.assertEquals("Wrong keyName", fileName, omKeyInfo.getKeyName());
+    Assert.assertEquals("Wrong parentID", parentID,
+        omKeyInfo.getParentObjectID());
+    String dbKey = parentID + OzoneConsts.OM_KEY_PREFIX + fileName;
+    Assert.assertEquals("Wrong path format", dbKey, omKeyInfo.getPath());
+  }
+
+  long verifyDirKey(long parentId, String dirKey, String absolutePath,
+      ArrayList<String> dirKeys, OMMetadataManager omMgr)
+      throws Exception {
+    String dbKey = parentId + "/" + dirKey;
+    dirKeys.add(dbKey);
+    OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+    Assert.assertNotNull("Failed to find " + absolutePath +
+        " using dbKey: " + dbKey, dirInfo);
+    Assert.assertEquals("Parent Id mismatches", parentId,
+        dirInfo.getParentObjectID());
+    Assert.assertEquals("Mismatches directory name", dirKey,
+        dirInfo.getName());
+    Assert.assertTrue("Mismatches directory creation time param",
+        dirInfo.getCreationTime() > 0);
+    Assert.assertEquals("Mismatches directory modification time param",
+        dirInfo.getCreationTime(), dirInfo.getModificationTime());
+    Assert.assertEquals("Wrong representation!",
+        dbKey + ":" + dirInfo.getObjectID(), dirInfo.toString());
+    return dirInfo.getObjectID();
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 71c4fd3..780b64a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -47,6 +47,7 @@
 import org.apache.hadoop.ozone.om.OMMetrics;
 import org.apache.hadoop.ozone.om.TrashPolicyOzone;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
 import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
@@ -61,7 +62,10 @@
 import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -71,12 +75,13 @@
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Optional;
+import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-import static org.apache.hadoop.fs.FileSystem.LOG;
 import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
 import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
 import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
@@ -85,8 +90,10 @@
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * Ozone file system tests that are not covered by contract tests.
@@ -95,6 +102,9 @@
 @RunWith(Parameterized.class)
 public class TestRootedOzoneFileSystem {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRootedOzoneFileSystem.class);
+
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
     return Arrays.asList(
@@ -110,11 +120,20 @@
     omRatisEnabled = enableOMRatis;
   }
 
+  public static FileSystem getFs() {
+    return fs;
+  }
+
+  public static Path getBucketPath() {
+    return bucketPath;
+  }
+
   @Rule
   public Timeout globalTimeout = Timeout.seconds(300);;
 
   private static boolean enabledFileSystemPaths;
   private static boolean omRatisEnabled;
+  private static boolean isBucketFSOptimized = false;
 
   private static OzoneConfiguration conf;
   private static MiniOzoneCluster cluster = null;
@@ -134,10 +153,18 @@
   @BeforeClass
   public static void init() throws Exception {
     conf = new OzoneConfiguration();
-    conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+    conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, (float) 0.15);
+    // Trash with 9 second deletes and 6 seconds checkpoints
+    conf.setFloat(FS_TRASH_INTERVAL_KEY, (float) 0.15); // 9 seconds
+    conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, (float) 0.1); // 6 seconds
     conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
-        enabledFileSystemPaths);
+    if (isBucketFSOptimized) {
+      TestOMRequestUtils.configureFSOptimizedPaths(conf,
+          true, OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    } else {
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+          enabledFileSystemPaths);
+    }
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .build();
@@ -179,6 +206,10 @@
     return cluster.getOzoneManager().getMetrics();
   }
 
+  protected static void setIsBucketFSOptimized(boolean isBucketFSO) {
+    isBucketFSOptimized = isBucketFSO;
+  }
+
   @Test
   public void testOzoneFsServiceLoader() throws IOException {
     OzoneConfiguration confTestLoader = new OzoneConfiguration();
@@ -1226,6 +1257,11 @@
     long prevNumTrashRenames = getOMMetrics().getNumTrashRenames();
     long prevNumTrashFileRenames = getOMMetrics().getNumTrashFilesRenames();
 
+    long prevNumTrashAtomicDirDeletes = getOMMetrics()
+        .getNumTrashAtomicDirDeletes();
+    long prevNumTrashAtomicDirRenames = getOMMetrics()
+        .getNumTrashAtomicDirRenames();
+
     // Call moveToTrash. We can't call protected fs.rename() directly
     trash.moveToTrash(keyPath1);
     // for key in second bucket
@@ -1254,11 +1290,17 @@
       }
     }, 1000, 180000);
 
-    // This condition should pass after the checkpoint
-    Assert.assertTrue(getOMMetrics()
-        .getNumTrashRenames() > prevNumTrashRenames);
-    Assert.assertTrue(getOMMetrics()
-        .getNumTrashFilesRenames() > prevNumTrashFileRenames);
+    if (isBucketFSOptimized){
+      Assert.assertTrue(getOMMetrics()
+          .getNumTrashAtomicDirRenames() > prevNumTrashAtomicDirRenames);
+    } else {
+      // This condition should pass after the checkpoint
+      Assert.assertTrue(getOMMetrics()
+          .getNumTrashRenames() > prevNumTrashRenames);
+      // With new layout version, file renames wouldn't be counted
+      Assert.assertTrue(getOMMetrics()
+          .getNumTrashFilesRenames() > prevNumTrashFileRenames);
+    }
 
     // wait for deletion of checkpoint dir
     GenericTestUtils.waitFor(()-> {
@@ -1273,10 +1315,16 @@
     }, 1000, 120000);
 
     // This condition should succeed once the checkpoint directory is deleted
-    GenericTestUtils.waitFor(
-        () -> getOMMetrics().getNumTrashDeletes() > prevNumTrashDeletes
-            && getOMMetrics().getNumTrashFilesDeletes()
-            > prevNumTrashFileDeletes, 100, 180000);
+    if(isBucketFSOptimized){
+      GenericTestUtils.waitFor(
+          () -> getOMMetrics().getNumTrashAtomicDirDeletes() >
+              prevNumTrashAtomicDirDeletes, 100, 180000);
+    } else {
+      GenericTestUtils.waitFor(
+          () -> getOMMetrics().getNumTrashDeletes() > prevNumTrashDeletes
+              && getOMMetrics().getNumTrashFilesDeletes()
+              >= prevNumTrashFileDeletes, 100, 180000);
+    }
     // Cleanup
     ofs.delete(trashRoot, true);
     ofs.delete(trashRoot2, true);
@@ -1309,4 +1357,160 @@
     LambdaTestUtils.intercept(InvalidPathException.class, "Invalid path Name",
         () -> fs.create(path, false));
   }
+
+  @Test
+  public void testRenameDir() throws Exception {
+    final String dir = "dir1";
+    final Path source = new Path(getBucketPath(), dir);
+    final Path dest = new Path(source.toString() + ".renamed");
+    // Add a sub-dir to the directory to be moved.
+    final Path subdir = new Path(source, "sub_dir1");
+    getFs().mkdirs(subdir);
+    LOG.info("Created dir {}", subdir);
+    LOG.info("Will move {} to {}", source, dest);
+    getFs().rename(source, dest);
+    assertTrue("Directory rename failed", getFs().exists(dest));
+    // Verify that the subdir is also renamed i.e. keys corresponding to the
+    // sub-directories of the renamed directory have also been renamed.
+    assertTrue("Keys under the renamed directory not renamed",
+        getFs().exists(new Path(dest, "sub_dir1")));
+    // cleanup
+    getFs().delete(dest, true);
+  }
+
+  @Test
+  public void testRenameFile() throws Exception {
+    final String dir = "/dir" + new Random().nextInt(1000);
+    Path dirPath = new Path(getBucketPath() +dir);
+    getFs().mkdirs(dirPath);
+
+    Path file1Source = new Path(getBucketPath() + dir
+        + "/file1_Copy");
+    ContractTestUtils.touch(getFs(), file1Source);
+    Path file1Destin = new Path(getBucketPath() + dir + "/file1");
+    assertTrue("Renamed failed", getFs().rename(file1Source, file1Destin));
+    assertTrue("Renamed failed: /dir/file1", getFs().exists(file1Destin));
+    FileStatus[] fStatus = getFs().listStatus(dirPath);
+    assertEquals("Renamed failed", 1, fStatus.length);
+    getFs().delete(getBucketPath(), true);
+  }
+
+
+
+  /**
+   * Rename file to an existed directory.
+   */
+  @Test
+  public void testRenameFileToDir() throws Exception {
+    final String dir = "/dir" + new Random().nextInt(1000);
+    Path dirPath = new Path(getBucketPath() +dir);
+    getFs().mkdirs(dirPath);
+
+    Path file1Destin = new Path(getBucketPath() + dir  + "/file1");
+    ContractTestUtils.touch(getFs(), file1Destin);
+    Path abcRootPath = new Path(getBucketPath() + "/a/b/c");
+    getFs().mkdirs(abcRootPath);
+    assertTrue("Renamed failed", getFs().rename(file1Destin, abcRootPath));
+    assertTrue("Renamed filed: /a/b/c/file1", getFs().exists(new Path(
+        abcRootPath, "file1")));
+    getFs().delete(getBucketPath(), true);
+  }
+
+  /**
+   * Rename to the source's parent directory, it will succeed.
+   * 1. Rename from /root_dir/dir1/dir2 to /root_dir.
+   * Expected result : /root_dir/dir2
+   * <p>
+   * 2. Rename from /root_dir/dir1/file1 to /root_dir.
+   * Expected result : /root_dir/file1.
+   */
+  @Test
+  public void testRenameToParentDir() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(getBucketPath() + dir2);
+    getFs().mkdirs(dir2SourcePath);
+    final Path destRootPath = new Path(getBucketPath() + root);
+
+    Path file1Source = new Path(getBucketPath() + dir1 + "/file2");
+    ContractTestUtils.touch(getFs(), file1Source);
+
+    // rename source directory to its parent directory(destination).
+    assertTrue("Rename failed", getFs().rename(dir2SourcePath, destRootPath));
+    final Path expectedPathAfterRename =
+        new Path(getBucketPath() + root + "/dir2");
+    assertTrue("Rename failed",
+        getFs().exists(expectedPathAfterRename));
+
+    // rename source file to its parent directory(destination).
+    assertTrue("Rename failed", getFs().rename(file1Source, destRootPath));
+    final Path expectedFilePathAfterRename =
+        new Path(getBucketPath() + root + "/file2");
+    assertTrue("Rename failed",
+        getFs().exists(expectedFilePathAfterRename));
+    getFs().delete(getBucketPath(), true);
+  }
+
+  /**
+   *  Cannot rename a directory to its own subdirectory.
+   */
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(getBucketPath() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    getFs().mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(getBucketPath() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    //  rename should fail and return false
+    try{
+      getFs().rename(sourceRoot, subDir1);
+      fail("Should throw exception : Cannot rename a directory to" +
+          " its own subdirectory");
+    } catch (IllegalArgumentException e){
+      //expected
+    }
+  }
+
+  /**
+   * Cleanup keyTable and directoryTable explicitly as FS delete operation
+   * is not yet supported.
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(getBucketPath() + dir2);
+    getFs().mkdirs(dir2SourcePath);
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(getBucketPath()
+        + root + "/b/c");
+
+    // rename should throw exception
+    try {
+      getFs().rename(dir2SourcePath, destinPath);
+      fail("Should fail as parent of dst does not exist!");
+    } catch (FileNotFoundException fnfe){
+      //expected
+    }
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(getBucketPath() + root + "/file1");
+    ContractTestUtils.touch(getFs(), filePath);
+    Path newDestinPath = new Path(filePath, "c");
+    // rename shouldthrow exception
+    try{
+      getFs().rename(dir2SourcePath, newDestinPath);
+      fail("Should fail as parent of dst is a file!");
+    } catch (IOException e){
+      //expected
+    }
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java
new file mode 100644
index 0000000..6863393
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+/**
+ * Tests to verify ofs with prefix enabled cases.
+ */
+@RunWith(Parameterized.class)
+public class TestRootedOzoneFileSystemWithFSO
+    extends TestRootedOzoneFileSystem {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRootedOzoneFileSystemWithFSO.class);
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(
+        new Object[]{true, true},
+        new Object[]{true, false});
+  }
+
+  public TestRootedOzoneFileSystemWithFSO(boolean setDefaultFs,
+      boolean enableOMRatis) throws Exception {
+    super(setDefaultFs, enableOMRatis);
+  }
+
+  @BeforeClass
+  public static void init() throws Exception {
+    setIsBucketFSOptimized(true);
+    TestRootedOzoneFileSystem.init();
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testTempMount() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testOzoneFsServiceLoader() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testCreateWithInvalidPaths() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testDeleteEmptyVolume() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testMkdirNonExistentVolumeBucket() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  @Override
+  @Test
+  @Ignore("HDDS-2939")
+  public void testMkdirNonExistentVolume() {
+    // ignore as this is not relevant to PREFIX layout changes
+  }
+
+  /**
+   * OFS: Test recursive listStatus on root and volume.
+   */
+  @Override
+  @Ignore("TODO:HDDS-4360")
+  public void testListStatusRootAndVolumeRecursive() throws IOException {
+  }
+
+  /**
+   * Cleanup keyTable and directoryTable explicitly as FS delete operation
+   * is not yet supported.
+   * Fails if the (a) parent of dst does not exist or (b) parent is a file.
+   */
+  @Override
+  @Test
+  public void testRenameDestinationParentDoesntExist() throws Exception {
+    final String root = "/root_dir";
+    final String dir1 = root + "/dir1";
+    final String dir2 = dir1 + "/dir2";
+    final Path dir2SourcePath = new Path(getBucketPath() + dir2);
+    getFs().mkdirs(dir2SourcePath);
+    // (a) parent of dst does not exist.  /root_dir/b/c
+    final Path destinPath = new Path(getBucketPath()
+        + root + "/b/c");
+
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, destinPath));
+    // (b) parent of dst is a file. /root_dir/file1/c
+    Path filePath = new Path(getBucketPath() + root + "/file1");
+    ContractTestUtils.touch(getFs(), filePath);
+    Path newDestinPath = new Path(filePath, "c");
+    // rename should fail and return false
+    Assert.assertFalse(getFs().rename(dir2SourcePath, newDestinPath));
+  }
+
+  /**
+   *  Cannot rename a directory to its own subdirectory.
+   */
+  @Override
+  @Test
+  public void testRenameDirToItsOwnSubDir() throws Exception {
+    final String root = "/root";
+    final String dir1 = root + "/dir1";
+    final Path dir1Path = new Path(getBucketPath() + dir1);
+    // Add a sub-dir1 to the directory to be moved.
+    final Path subDir1 = new Path(dir1Path, "sub_dir1");
+    getFs().mkdirs(subDir1);
+    LOG.info("Created dir1 {}", subDir1);
+
+    final Path sourceRoot = new Path(getBucketPath() + root);
+    LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
+    //  rename should fail and return false
+    Assert.assertFalse(getFs().rename(sourceRoot, subDir1));
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
index 19ff428..034cf1e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests creating files.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractCreate extends AbstractContractCreateTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractCreate(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractCreate.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
index 33e6260..1381a2c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests covering deletes.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractDelete(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractDelete.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
index 9d9aa56..04a3fb5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
@@ -19,28 +19,42 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Ozone contract tests covering getFileStatus.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractGetFileStatus
     extends AbstractContractGetFileStatusTest {
 
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractGetFileStatus(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
   private static final Logger LOG =
       LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class);
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractGetFileStatus.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -63,4 +77,9 @@
   protected Configuration createConfiguration() {
     return super.createConfiguration();
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
index 305164c..862b2b9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Test dir operations on Ozone.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractMkdir(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractMkdir.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -46,4 +59,9 @@
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
index aa81965..83a6306 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
@@ -19,21 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests opening files.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractOpen extends AbstractContractOpenTest {
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractOpen(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractOpen.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -45,4 +59,9 @@
   protected AbstractFSContract createContract(Configuration conf) {
     return new OzoneContract(conf);
   }
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
index 3660d81..2fa1c64 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
@@ -19,22 +19,35 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract tests covering rename.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractRename extends AbstractContractRenameTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractRename(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractRename.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -47,4 +60,9 @@
     return new OzoneContract(conf);
   }
 
+
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
index c64dafa..5ca5bc3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
@@ -19,23 +19,36 @@
 package org.apache.hadoop.fs.ozone.contract;
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * Ozone contract test for ROOT directory operations.
  */
+@RunWith(Parameterized.class)
 public class ITestOzoneContractRootDir extends
     AbstractContractRootDirectoryTest {
 
-  @BeforeClass
-  public static void createCluster() throws IOException {
-    OzoneContract.createCluster();
+  private static boolean fsOptimizedServer;
+
+  public ITestOzoneContractRootDir(boolean fsoServer)
+      throws IOException {
+    if (fsOptimizedServer != fsoServer) {
+      setFsOptimizedServer(fsoServer);
+      ITestOzoneContractUtils.restartCluster(
+          fsOptimizedServer);
+    }
+  }
+
+  public static void setFsOptimizedServer(boolean fsOptimizedServer) {
+    ITestOzoneContractRootDir.fsOptimizedServer = fsOptimizedServer;
   }
 
   @AfterClass
@@ -48,4 +61,9 @@
     return new OzoneContract(conf);
   }
 
+  @Parameterized.Parameters
+  public static Collection data() {
+    return ITestOzoneContractUtils.getFsoCombinations();
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
index 2f22025..51a35ee 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
@@ -31,6 +31,7 @@
  * Ozone contract tests covering file seek.
  */
 public class ITestOzoneContractSeek extends AbstractContractSeekTest {
+
   @BeforeClass
   public static void createCluster() throws IOException {
     OzoneContract.createCluster();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
new file mode 100644
index 0000000..0fc23c3
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone.contract;
+
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Utility class for Ozone-contract tests.
+ */
+public final class ITestOzoneContractUtils {
+
+  private ITestOzoneContractUtils(){}
+
+  private static List<Object> fsoCombinations = Arrays.asList(new Object[] {
+      // FSO configuration is a cluster level server side configuration.
+      // If the cluster is configured with SIMPLE metadata layout,
+      // non-FSO bucket will created.
+      // If the cluster is configured with PREFIX metadata layout,
+      // FSO bucket will be created.
+      // Presently, OzoneClient checks bucketMetadata then invokes FSO or
+      // non-FSO specific code and it makes no sense to add client side
+      // configs now. Once the specific client API to set FSO or non-FSO
+      // bucket is provided the contract test can be refactored to include
+      // another parameter (fsoClient) which sets/unsets the client side
+      // configs.
+      true, // Server is configured with new layout (PREFIX)
+      // and new buckets will be operated on
+      false // Server is configured with old layout (SIMPLE)
+      // and old buckets will be operated on
+  });
+
+  static List<Object> getFsoCombinations(){
+    return fsoCombinations;
+  }
+
+  public static void restartCluster(boolean fsOptimizedServer)
+      throws IOException {
+    OzoneContract.destroyCluster();
+    OzoneContract.initOzoneConfiguration(
+        fsOptimizedServer);
+    OzoneContract.createCluster();
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index 104b10c..925ef6a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -46,6 +46,8 @@
   private static MiniOzoneCluster cluster;
   private static final String CONTRACT_XML = "contract/ozone.xml";
 
+  private static boolean fsOptimizedServer;
+
   OzoneContract(Configuration conf) {
     super(conf);
     //insert the base features
@@ -63,6 +65,10 @@
     return path;
   }
 
+  public static void initOzoneConfiguration(boolean fsoServer){
+    fsOptimizedServer = fsoServer;
+  }
+
   public static void createCluster() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
     DatanodeRatisServerConfig ratisServerConfig =
@@ -79,6 +85,13 @@
 
     conf.addResource(CONTRACT_XML);
 
+    if (fsOptimizedServer){
+      conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+          true);
+      conf.set(OMConfigKeys.OZONE_OM_METADATA_LAYOUT,
+          OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    }
+
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
     try {
       cluster.waitForClusterToBeReady();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
new file mode 100644
index 0000000..9283ee6
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -0,0 +1,1024 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneTestUtils;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneMultipartUpload;
+import org.apache.hadoop.ozone.client.OzoneMultipartUploadList;
+import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * This test verifies all the S3 multipart client apis - prefix layout.
+ */
+public class TestOzoneClientMultipartUploadWithFSO {
+
+  private static ObjectStore store = null;
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneClient ozClient = null;
+
+  private static String scmId = UUID.randomUUID().toString();
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  /**
+   * Create a MiniOzoneCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    startCluster(conf);
+  }
+
+  /**
+   * Close OzoneClient and shutdown MiniOzoneCluster.
+   */
+  @AfterClass
+  public static void shutdown() throws IOException {
+    shutdownCluster();
+  }
+
+
+  /**
+   * Create a MiniOzoneCluster for testing.
+   * @param conf Configurations to start the cluster.
+   * @throws Exception
+   */
+  static void startCluster(OzoneConfiguration conf) throws Exception {
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .setTotalPipelineNumLimit(10)
+            .setScmId(scmId)
+            .build();
+    cluster.waitForClusterToBeReady();
+    ozClient = OzoneClientFactory.getRpcClient(conf);
+    store = ozClient.getObjectStore();
+  }
+
+  /**
+   * Close OzoneClient and shutdown MiniOzoneCluster.
+   */
+  static void shutdownCluster() throws IOException {
+    if(ozClient != null) {
+      ozClient.close();
+    }
+
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testInitiateMultipartUploadWithReplicationInformationSet() throws
+          IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    Assert.assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    Assert.assertNotNull(multipartInfo.getUploadID());
+
+    // Call initiate multipart upload for the same key again, this should
+    // generate a new uploadID.
+    multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    Assert.assertNotNull(multipartInfo);
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    Assert.assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    Assert.assertNotNull(multipartInfo.getUploadID());
+  }
+
+  @Test
+  public void testInitiateMultipartUploadWithDefaultReplication() throws
+          IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
+
+    Assert.assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    Assert.assertNotNull(multipartInfo.getUploadID());
+
+    // Call initiate multipart upload for the same key again, this should
+    // generate a new uploadID.
+    multipartInfo = bucket.initiateMultipartUpload(keyName);
+
+    Assert.assertNotNull(multipartInfo);
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    Assert.assertNotEquals(multipartInfo.getUploadID(), uploadID);
+    Assert.assertNotNull(multipartInfo.getUploadID());
+  }
+
+  @Test
+  public void testUploadPartWithNoOverride() throws IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String sampleData = "sample Value";
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    Assert.assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    Assert.assertNotNull(multipartInfo.getUploadID());
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), 1, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    Assert.assertNotNull(commitUploadPartInfo);
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
+  }
+
+  @Test
+  public void testUploadPartOverrideWithRatis() throws IOException {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+    String sampleData = "sample Value";
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            ReplicationType.RATIS, THREE);
+
+    Assert.assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    Assert.assertNotNull(multipartInfo.getUploadID());
+
+    int partNumber = 1;
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), partNumber, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    Assert.assertNotNull(commitUploadPartInfo);
+    String partName = commitUploadPartInfo.getPartName();
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
+
+    //Overwrite the part by creating part key with same part number.
+    sampleData = "sample Data Changed";
+    ozoneOutputStream = bucket.createMultipartKey(keyName,
+            sampleData.length(), partNumber, uploadID);
+    ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
+    ozoneOutputStream.close();
+
+    commitUploadPartInfo = ozoneOutputStream
+            .getCommitUploadPartInfo();
+
+    Assert.assertNotNull(commitUploadPartInfo);
+    Assert.assertNotNull(commitUploadPartInfo.getPartName());
+
+    // PartName should be different from old part Name.
+    Assert.assertNotEquals("Part names should be different", partName,
+            commitUploadPartInfo.getPartName());
+  }
+
+  @Test
+  public void testMultipartUploadWithPartsLessThanMinSize() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    // Initiate multipart upload
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    // Upload Parts
+    Map<Integer, String> partsMap = new TreeMap<>();
+    // Uploading part 1 with less than min size
+    String partName = uploadPart(bucket, keyName, uploadID, 1,
+            "data".getBytes(UTF_8));
+    partsMap.put(1, partName);
+
+    partName = uploadPart(bucket, keyName, uploadID, 2,
+            "data".getBytes(UTF_8));
+    partsMap.put(2, partName);
+
+    // Complete multipart upload
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent()
+          throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    // We have not uploaded any parts, but passing some list it should throw
+    // error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(1, UUID.randomUUID().toString());
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName()
+          throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+    // passing with an incorrect part name, should throw INVALID_PART error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(1, UUID.randomUUID().toString());
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testMultipartUploadWithMissingParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+            ONE);
+
+    uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+    // passing with an incorrect part number, should throw INVALID_PART error.
+    TreeMap<Integer, String> partsMap = new TreeMap<>();
+    partsMap.put(3, "random");
+
+    OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+        () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+  }
+
+  @Test
+  public void testCommitPartAfterCompleteUpload() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
+            STAND_ALONE, ONE);
+
+    Assert.assertNotNull(omMultipartInfo.getUploadID());
+
+    String uploadID = omMultipartInfo.getUploadID();
+
+    // upload part 1.
+    byte[] data = generateData(5 * 1024 * 1024,
+            (byte) RandomUtils.nextLong());
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, 1, uploadID);
+    ozoneOutputStream.write(data, 0, data.length);
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
+            ozoneOutputStream.getCommitUploadPartInfo();
+
+    // Do not close output stream for part 2.
+    ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, 2, omMultipartInfo.getUploadID());
+    ozoneOutputStream.write(data, 0, data.length);
+
+    Map<Integer, String> partsMap = new LinkedHashMap<>();
+    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
+    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
+            bucket.completeMultipartUpload(keyName,
+                    uploadID, partsMap);
+    Assert.assertNotNull(omMultipartUploadCompleteInfo);
+
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
+
+    byte[] fileContent = new byte[data.length];
+    OzoneInputStream inputStream = bucket.readKey(keyName);
+    inputStream.read(fileContent);
+    StringBuilder sb = new StringBuilder(data.length);
+
+    // Combine all parts data, and check is it matching with get key data.
+    String part1 = new String(data, UTF_8);
+    sb.append(part1);
+    Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
+
+    try {
+      ozoneOutputStream.close();
+      Assert.fail("testCommitPartAfterCompleteUpload failed");
+    } catch (IOException ex) {
+      Assert.assertTrue(ex instanceof OMException);
+      Assert.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+              ((OMException) ex).getResult());
+    }
+  }
+
+  @Test
+  public void testAbortUploadFail() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    OzoneTestUtils.expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+        () -> bucket.abortMultipartUpload(keyName, "random"));
+  }
+
+  @Test
+  public void testAbortUploadFailWithInProgressPartUpload() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName,
+        STAND_ALONE, ONE);
+
+    Assert.assertNotNull(omMultipartInfo.getUploadID());
+
+    // Do not close output stream.
+    byte[] data = "data".getBytes(UTF_8);
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+        data.length, 1, omMultipartInfo.getUploadID());
+    ozoneOutputStream.write(data, 0, data.length);
+
+    // Abort before completing part upload.
+    bucket.abortMultipartUpload(keyName, omMultipartInfo.getUploadID());
+
+    try {
+      ozoneOutputStream.close();
+      fail("testAbortUploadFailWithInProgressPartUpload failed");
+    } catch (IOException ex) {
+      assertTrue(ex instanceof OMException);
+      assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+          ((OMException) ex).getResult());
+    }
+  }
+
+  @Test
+  public void testAbortUploadSuccessWithOutAnyParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    bucket.abortMultipartUpload(keyName, uploadID);
+  }
+
+  @Test
+  public void testAbortUploadSuccessWithParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/";
+    String keyName = parentDir + UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    String partName = uploadPart(bucket, keyName, uploadID, 1,
+        "data".getBytes(UTF_8));
+
+    OMMetadataManager metadataMgr =
+        cluster.getOzoneManager().getMetadataManager();
+    String multipartKey = verifyUploadedPart(volumeName, bucketName, keyName,
+        uploadID, partName, metadataMgr);
+
+    bucket.abortMultipartUpload(keyName, uploadID);
+
+    String multipartOpenKey =
+        getMultipartOpenKey(uploadID, volumeName, bucketName, keyName,
+            metadataMgr);
+    OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable().get(multipartOpenKey);
+    OmMultipartKeyInfo omMultipartKeyInfo =
+        metadataMgr.getMultipartInfoTable().get(multipartKey);
+    Assert.assertNull(omKeyInfo);
+    Assert.assertNull(omMultipartKeyInfo);
+
+    // Since deleteTable operation is performed via
+    // batchOp - Table.putWithBatch(), which is an async operation and
+    // not making any assertion for the same.
+  }
+
+  @Test
+  public void testListMultipartUploadParts() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String parentDir = "a/b/c/d/e/f/";
+    String keyName = parentDir + "file-ABC";
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    Map<Integer, String> partsMap = new TreeMap<>();
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(1, partName1);
+
+    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(2, partName2);
+
+    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(3, partName3);
+
+    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
+        bucket.listParts(keyName, uploadID, 0, 3);
+
+    Assert.assertEquals(STAND_ALONE,
+        ozoneMultipartUploadPartListParts.getReplicationType());
+    Assert.assertEquals(3,
+        ozoneMultipartUploadPartListParts.getPartInfoList().size());
+
+    verifyPartNamesInDB(volumeName, bucketName, keyName, partsMap,
+        ozoneMultipartUploadPartListParts, uploadID);
+
+    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+  }
+
+  private void verifyPartNamesInDB(String volumeName, String bucketName,
+      String keyName, Map<Integer, String> partsMap,
+      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts,
+      String uploadID) throws IOException {
+
+    List<String> listPartNames = new ArrayList<>();
+    String keyPartName = verifyPartNames(partsMap, 0,
+        ozoneMultipartUploadPartListParts);
+    listPartNames.add(keyPartName);
+
+    keyPartName = verifyPartNames(partsMap, 1,
+        ozoneMultipartUploadPartListParts);
+    listPartNames.add(keyPartName);
+
+    keyPartName = verifyPartNames(partsMap, 2,
+        ozoneMultipartUploadPartListParts);
+    listPartNames.add(keyPartName);
+
+    OMMetadataManager metadataMgr =
+        cluster.getOzoneManager().getMetadataManager();
+    String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName,
+        keyName, uploadID);
+    OmMultipartKeyInfo omMultipartKeyInfo =
+        metadataMgr.getMultipartInfoTable().get(multipartKey);
+    Assert.assertNotNull(omMultipartKeyInfo);
+
+    TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap =
+        omMultipartKeyInfo.getPartKeyInfoMap();
+    for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry :
+        partKeyInfoMap.entrySet()) {
+      OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo = entry.getValue();
+      String partKeyName = partKeyInfo.getPartName();
+
+      // reconstruct full part name with volume, bucket, partKeyName
+      String fullKeyPartName =
+          metadataMgr.getOzoneKey(volumeName, bucketName, keyName);
+
+      // partKeyName format in DB - partKeyName + ClientID
+      Assert.assertTrue("Invalid partKeyName format in DB: " + partKeyName
+              + ", expected name:" + fullKeyPartName,
+          partKeyName.startsWith(fullKeyPartName));
+
+      listPartNames.remove(partKeyName);
+    }
+
+    Assert.assertTrue("Wrong partKeyName format in DB!",
+        listPartNames.isEmpty());
+  }
+
+  private String verifyPartNames(Map<Integer, String> partsMap, int index,
+      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts) {
+
+    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+            .getPartInfoList().get(index).getPartNumber()),
+        ozoneMultipartUploadPartListParts.getPartInfoList().get(index)
+            .getPartName());
+
+    return ozoneMultipartUploadPartListParts.getPartInfoList().get(index)
+        .getPartName();
+  }
+
+  @Test
+  public void testListMultipartUploadPartsWithContinuation()
+      throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    Map<Integer, String> partsMap = new TreeMap<>();
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    String partName1 = uploadPart(bucket, keyName, uploadID, 1,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(1, partName1);
+
+    String partName2 =uploadPart(bucket, keyName, uploadID, 2,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(2, partName2);
+
+    String partName3 =uploadPart(bucket, keyName, uploadID, 3,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+    partsMap.put(3, partName3);
+
+    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
+        bucket.listParts(keyName, uploadID, 0, 2);
+
+    Assert.assertEquals(STAND_ALONE,
+        ozoneMultipartUploadPartListParts.getReplicationType());
+
+    Assert.assertEquals(2,
+        ozoneMultipartUploadPartListParts.getPartInfoList().size());
+
+    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+            .getPartInfoList().get(0).getPartNumber()),
+        ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
+            .getPartName());
+    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+            .getPartInfoList().get(1).getPartNumber()),
+        ozoneMultipartUploadPartListParts.getPartInfoList().get(1)
+            .getPartName());
+
+    // Get remaining
+    Assert.assertTrue(ozoneMultipartUploadPartListParts.isTruncated());
+    ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID,
+        ozoneMultipartUploadPartListParts.getNextPartNumberMarker(), 2);
+
+    Assert.assertEquals(1,
+        ozoneMultipartUploadPartListParts.getPartInfoList().size());
+    Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+            .getPartInfoList().get(0).getPartNumber()),
+        ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
+            .getPartName());
+
+
+    // As we don't have any parts for this, we should get false here
+    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+
+  }
+
+  @Test
+  public void testListPartsInvalidPartMarker() throws Exception {
+    try {
+      String volumeName = UUID.randomUUID().toString();
+      String bucketName = UUID.randomUUID().toString();
+      String keyName = UUID.randomUUID().toString();
+
+      store.createVolume(volumeName);
+      OzoneVolume volume = store.getVolume(volumeName);
+      volume.createBucket(bucketName);
+      OzoneBucket bucket = volume.getBucket(bucketName);
+
+      bucket.listParts(keyName, "random", -1, 2);
+      Assert.fail("Should throw exception as partNumber is an invalid number!");
+    } catch (IllegalArgumentException ex) {
+      GenericTestUtils.assertExceptionContains("Should be greater than or "
+          + "equal to zero", ex);
+    }
+  }
+
+  @Test
+  public void testListPartsInvalidMaxParts() throws Exception {
+    try {
+      String volumeName = UUID.randomUUID().toString();
+      String bucketName = UUID.randomUUID().toString();
+      String keyName = UUID.randomUUID().toString();
+
+      store.createVolume(volumeName);
+      OzoneVolume volume = store.getVolume(volumeName);
+      volume.createBucket(bucketName);
+      OzoneBucket bucket = volume.getBucket(bucketName);
+
+      bucket.listParts(keyName, "random", 1, -1);
+      Assert.fail("Should throw exception as max parts is an invalid number!");
+    } catch (IllegalArgumentException ex) {
+      GenericTestUtils.assertExceptionContains("Max Parts Should be greater "
+          + "than zero", ex);
+    }
+  }
+
+  @Test
+  public void testListPartsWithPartMarkerGreaterThanPartCount()
+      throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+
+    String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+        ONE);
+    uploadPart(bucket, keyName, uploadID, 1,
+        generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97));
+
+
+    OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
+        bucket.listParts(keyName, uploadID, 100, 2);
+
+    // Should return empty
+
+    Assert.assertEquals(0,
+        ozoneMultipartUploadPartListParts.getPartInfoList().size());
+    Assert.assertEquals(STAND_ALONE,
+        ozoneMultipartUploadPartListParts.getReplicationType());
+
+    // As we don't have any parts with greater than partNumberMarker and list
+    // is not truncated, so it should return false here.
+    Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+
+  }
+
+  @Test
+  public void testListPartsWithInvalidUploadID() throws Exception {
+    OzoneTestUtils
+        .expectOmException(NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> {
+          String volumeName = UUID.randomUUID().toString();
+          String bucketName = UUID.randomUUID().toString();
+          String keyName = UUID.randomUUID().toString();
+
+          store.createVolume(volumeName);
+          OzoneVolume volume = store.getVolume(volumeName);
+          volume.createBucket(bucketName);
+          OzoneBucket bucket = volume.getBucket(bucketName);
+          OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
+              bucket.listParts(keyName, "random", 100, 2);
+        });
+  }
+
+  @Test
+  public void testListMultipartUpload() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String dirName = "dir1/dir2/dir3";
+    String key1 = "dir1" + "/key1";
+    String key2 = "dir1/dir2" + "/key2";
+    String key3 = dirName + "/key3";
+    List<String> keys = new ArrayList<>();
+    keys.add(key1);
+    keys.add(key2);
+    keys.add(key3);
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    // Initiate multipart upload
+    String uploadID1 = initiateMultipartUpload(bucket, key1, STAND_ALONE,
+        ONE);
+    String uploadID2 = initiateMultipartUpload(bucket, key2, STAND_ALONE,
+        ONE);
+    String uploadID3 = initiateMultipartUpload(bucket, key3, STAND_ALONE,
+        ONE);
+
+    // Upload Parts
+    // Uploading part 1 with less than min size
+    uploadPart(bucket, key1, uploadID1, 1, "data".getBytes(UTF_8));
+    uploadPart(bucket, key2, uploadID2, 1, "data".getBytes(UTF_8));
+    uploadPart(bucket, key3, uploadID3, 1, "data".getBytes(UTF_8));
+
+    OzoneMultipartUploadList listMPUs = bucket.listMultipartUploads("dir1");
+    Assert.assertEquals(3, listMPUs.getUploads().size());
+    List<String> expectedList = new ArrayList<>(keys);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+
+    listMPUs = bucket.listMultipartUploads("dir1/dir2");
+    Assert.assertEquals(2, listMPUs.getUploads().size());
+    expectedList = new ArrayList<>();
+    expectedList.add(key2);
+    expectedList.add(key3);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+
+    listMPUs = bucket.listMultipartUploads("dir1/dir2/dir3");
+    Assert.assertEquals(1, listMPUs.getUploads().size());
+    expectedList = new ArrayList<>();
+    expectedList.add(key3);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+
+    // partial key
+    listMPUs = bucket.listMultipartUploads("d");
+    Assert.assertEquals(3, listMPUs.getUploads().size());
+    expectedList = new ArrayList<>(keys);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+
+    // partial key
+    listMPUs = bucket.listMultipartUploads("");
+    Assert.assertEquals(3, listMPUs.getUploads().size());
+    expectedList = new ArrayList<>(keys);
+    for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
+      expectedList.remove(mpu.getKeyName());
+    }
+    Assert.assertEquals(0, expectedList.size());
+  }
+
+  private String verifyUploadedPart(String volumeName, String bucketName,
+      String keyName, String uploadID, String partName,
+      OMMetadataManager metadataMgr) throws IOException {
+    String multipartOpenKey =
+        getMultipartOpenKey(uploadID, volumeName, bucketName, keyName,
+            metadataMgr);
+
+    String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName,
+        keyName, uploadID);
+    OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable().get(multipartOpenKey);
+    OmMultipartKeyInfo omMultipartKeyInfo =
+        metadataMgr.getMultipartInfoTable().get(multipartKey);
+
+    Assert.assertNotNull(omKeyInfo);
+    Assert.assertNotNull(omMultipartKeyInfo);
+    Assert.assertEquals(OzoneFSUtils.getFileName(keyName),
+        omKeyInfo.getKeyName());
+    Assert.assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
+
+    TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap =
+        omMultipartKeyInfo.getPartKeyInfoMap();
+    for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry :
+        partKeyInfoMap.entrySet()) {
+      OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo = entry.getValue();
+      OmKeyInfo currentKeyPartInfo =
+          OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
+
+      Assert.assertEquals(keyName, currentKeyPartInfo.getKeyName());
+
+      // verify dbPartName
+      Assert.assertEquals(partName, partKeyInfo.getPartName());
+    }
+    return multipartKey;
+  }
+
+  private String getMultipartOpenKey(String multipartUploadID,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    long parentID = getParentID(volumeName, bucketName, keyName,
+        omMetadataManager);
+
+    String multipartKey = omMetadataManager.getMultipartKey(parentID,
+        fileName, multipartUploadID);
+
+    return multipartKey;
+  }
+
+  private long getParentID(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    return OMFileRequest.getParentID(bucketId, pathComponents,
+        keyName, omMetadataManager);
+  }
+
+  private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
+      ReplicationType replicationType, ReplicationFactor replicationFactor)
+          throws Exception {
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+            replicationType, replicationFactor);
+
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertNotNull(uploadID);
+
+    return uploadID;
+  }
+
+  private String uploadPart(OzoneBucket bucket, String keyName, String
+      uploadID, int partNumber, byte[] data) throws Exception {
+
+    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+            data.length, partNumber, uploadID);
+    ozoneOutputStream.write(data, 0,
+            data.length);
+    ozoneOutputStream.close();
+
+    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
+            ozoneOutputStream.getCommitUploadPartInfo();
+
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
+    Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
+
+    return omMultipartCommitUploadPartInfo.getPartName();
+  }
+
+  private void completeMultipartUpload(OzoneBucket bucket, String keyName,
+      String uploadID, Map<Integer, String> partsMap) throws Exception {
+    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket
+            .completeMultipartUpload(keyName, uploadID, partsMap);
+
+    Assert.assertNotNull(omMultipartUploadCompleteInfo);
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), bucket
+            .getName());
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), bucket
+            .getVolumeName());
+    Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), keyName);
+    Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash());
+  }
+
+  private byte[] generateData(int size, byte val) {
+    byte[] chars = new byte[size];
+    Arrays.fill(chars, val);
+    return chars;
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index 91e187c..f94e47e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.UUID;
@@ -47,24 +49,32 @@
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.junit.Assert.fail;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+
+import org.junit.rules.ExpectedException;
 
 /**
  * Test read retries from multiple nodes in the pipeline.
  */
+@RunWith(Parameterized.class)
 public class TestReadRetries {
 
   /**
@@ -76,24 +86,37 @@
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient ozClient = null;
-  private static ObjectStore store = null;
-  private static OzoneManager ozoneManager;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
+  private MiniOzoneCluster cluster = null;
+  private OzoneClient ozClient = null;
+  private ObjectStore store = null;
+  private OzoneManager ozoneManager;
+  private StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
   private static final String SCM_ID = UUID.randomUUID().toString();
+  private String layoutVersion;
 
+  public TestReadRetries(String layoutVersion) {
+    this.layoutVersion = layoutVersion;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(
+            new Object[]{OMConfigKeys.OZONE_OM_METADATA_LAYOUT_DEFAULT },
+            new Object[]{OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX });
+  }
 
   /**
    * Create a MiniOzoneCluster for testing.
    * @throws Exception
    */
-  @BeforeClass
-  public static void init() throws Exception {
+  @Before
+  public void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, layoutVersion);
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(3)
         .setScmId(SCM_ID)
@@ -112,8 +135,8 @@
   /**
    * Close OzoneClient and shutdown MiniOzoneCluster.
    */
-  @AfterClass
-  public static void shutdown() throws IOException {
+  @After
+  public void shutdown() throws IOException {
     if(ozClient != null) {
       ozClient.close();
     }
@@ -140,7 +163,7 @@
     volume.createBucket(bucketName);
     OzoneBucket bucket = volume.getBucket(bucketName);
 
-    String keyName = UUID.randomUUID().toString();
+    String keyName = "a/b/c/" + UUID.randomUUID().toString();
 
     OzoneOutputStream out = bucket
         .createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS,
@@ -188,6 +211,10 @@
     cluster.shutdownHddsDatanode(datanodeDetails);
     // try to read, this should be successful
     readKey(bucket, keyName, value);
+
+    // read intermediate directory
+    verifyIntermediateDir(bucket, "a/b/c");
+
     // shutdown the second datanode
     datanodeDetails = datanodes.get(1);
     cluster.shutdownHddsDatanode(datanodeDetails);
@@ -210,6 +237,13 @@
     factory.releaseClient(clientSpi, false);
   }
 
+  private void verifyIntermediateDir(OzoneBucket bucket, String dir)
+      throws IOException {
+    OzoneFileStatus fileStatus = bucket.getFileStatus(dir);
+    Assert.assertTrue(fileStatus.isDirectory());
+    Assert.assertEquals(dir, fileStatus.getTrimmedName());
+  }
+
   private void readKey(OzoneBucket bucket, String keyName, String data)
       throws IOException {
     OzoneKey key = bucket.getKey(keyName);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
index 7074041..129101c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
@@ -31,12 +31,15 @@
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
+import java.util.ArrayList;
 
 /**
  * Test for HadoopDirTreeGenerator.
@@ -47,6 +50,8 @@
   private OzoneConfiguration conf = null;
   private MiniOzoneCluster cluster = null;
   private ObjectStore store = null;
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestHadoopDirTreeGenerator.class);
 
   @Before
   public void setup() {
@@ -74,7 +79,7 @@
    * @throws IOException
    */
   private void startCluster() throws Exception {
-    conf = new OzoneConfiguration();
+    conf = getOzoneConfiguration();
 
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
     cluster.waitForClusterToBeReady();
@@ -83,6 +88,10 @@
     store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
   }
 
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @Test
   public void testNestedDirTreeGeneration() throws Exception {
     try {
@@ -103,6 +112,9 @@
               2, 4, 2);
       verifyDirTree("vol5", "bucket1", 5,
               4, 1, 0);
+      // default page size is Constants.LISTING_PAGE_SIZE = 1024
+      verifyDirTree("vol6", "bucket1", 2,
+              1, 1100, 0);
     } finally {
       shutdown();
     }
@@ -122,6 +134,7 @@
             fileCount + "", "-s", span + "", "-n", "1", "-r", rootPath,
                      "-g", perFileSizeInBytes + ""});
     // verify the directory structure
+    LOG.info("Started verifying the directory structure...");
     FileSystem fileSystem = FileSystem.get(URI.create(rootPath),
             conf);
     Path rootDir = new Path(rootPath.concat("/"));
@@ -149,6 +162,7 @@
       verifyActualSpan(expectedSpanCnt, fileStatuses);
     }
     int actualNumFiles = 0;
+    ArrayList <String> files = new ArrayList<>();
     for (FileStatus fileStatus : fileStatuses) {
       if (fileStatus.isDirectory()) {
         ++depth;
@@ -157,6 +171,12 @@
       } else {
         Assert.assertEquals("Mismatches file len",
                 perFileSizeInBytes, fileStatus.getLen());
+        String fName = fileStatus.getPath().getName();
+        Assert.assertFalse("actualNumFiles:" + actualNumFiles +
+                        ", fName:" + fName + ", expectedFileCnt:" +
+                        expectedFileCnt + ", depth:" + depth,
+                files.contains(fName));
+        files.add(fName);
         actualNumFiles++;
       }
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java
new file mode 100644
index 0000000..c776cef
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+
+/**
+ * Test for HadoopDirTreeGenerator - prefix layout.
+ */
+public class TestHadoopDirTreeGeneratorWithFSO
+    extends TestHadoopDirTreeGenerator {
+
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    return conf;
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithLayout.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithLayout.java
new file mode 100644
index 0000000..6c9a131
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMStartupWithLayout.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METADATA_LAYOUT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METADATA_LAYOUT_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX;
+
+/**
+ * Verifies OM startup with different layout.
+ */
+public class TestOMStartupWithLayout {
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = Timeout.seconds(300);
+
+  private static MiniOzoneCluster cluster;
+
+  @BeforeClass
+  public static void startClusterWithSimpleLayout() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    String omId = UUID.randomUUID().toString();
+    TestOMRequestUtils.configureFSOptimizedPaths(conf, true,
+        OZONE_OM_METADATA_LAYOUT_DEFAULT);
+    cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId)
+        .setScmId(scmId).setOmId(omId).build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @AfterClass
+  public static void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testWithDifferentClusterLayout() throws Exception {
+    OzoneConfiguration conf = cluster.getOzoneManager().getConfiguration();
+
+    // create a volume and a bucket with default(SIMPLE) metadata format.
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    verifyBucketLayout(bucket, OZONE_OM_METADATA_LAYOUT_DEFAULT, false);
+
+    cluster.getOzoneManager().stop();
+
+    // case-1) Configured cluster layout as PREFIX. Bucket exists with SIMPLE
+    // layout format. OM startup should fail.
+    conf.set(OZONE_OM_METADATA_LAYOUT, OZONE_OM_METADATA_LAYOUT_PREFIX);
+    verifyOMStartupFailure(OZONE_OM_METADATA_LAYOUT_PREFIX);
+    verifyOMRestartFailure(OZONE_OM_METADATA_LAYOUT_PREFIX);
+
+    // case-2) Configured cluster layout as SIMPLE. Bucket exists with SIMPLE
+    // layout format. OM startup should be successful.
+    conf.set(OZONE_OM_METADATA_LAYOUT, OZONE_OM_METADATA_LAYOUT_DEFAULT);
+    // ensure everything works again with SIMPLE layout format
+    cluster.getOzoneManager().restart();
+    OzoneBucket bucket2 = TestDataUtil.createVolumeAndBucket(cluster);
+    verifyBucketLayout(bucket2, OZONE_OM_METADATA_LAYOUT_DEFAULT, false);
+
+    // Cleanup buckets so that the cluster can be started with PREFIX
+    OzoneClient client = cluster.getClient();
+    OzoneVolume volume =
+        client.getObjectStore().getVolume(bucket.getVolumeName());
+    OzoneVolume volume2 =
+        client.getObjectStore().getVolume(bucket2.getVolumeName());
+    volume.deleteBucket(bucket.getName());
+    volume2.deleteBucket(bucket2.getName());
+
+    // case-3) Configured cluster layout as PREFIX and ENABLE_FSPATH=false.
+    // OM startup should fail as this is INVALID config.
+    cluster.getOzoneManager().stop();
+    conf.set(OZONE_OM_METADATA_LAYOUT, OZONE_OM_METADATA_LAYOUT_PREFIX);
+    conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, false);
+    verifyOmStartWithInvalidConfig(OZONE_OM_METADATA_LAYOUT_PREFIX);
+    verifyOmRestartWithInvalidConfig(OZONE_OM_METADATA_LAYOUT_PREFIX);
+
+    // case-4) Configured cluster layout as INVALID.
+    // OM startup should fail as this is INVALID config.
+    conf.set(OZONE_OM_METADATA_LAYOUT, "INVALID");
+    verifyOmStartWithInvalidConfig("INVALID");
+    verifyOmRestartWithInvalidConfig("INVALID");
+
+    // case-5) Configured cluster layout as PREFIX and ENABLE_FSPATH=true.
+    // No buckets. OM startup should be successful.
+    conf.set(OZONE_OM_METADATA_LAYOUT, OZONE_OM_METADATA_LAYOUT_PREFIX);
+    conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
+    cluster.getOzoneManager().restart();
+    OzoneBucket bucket3 = TestDataUtil.createVolumeAndBucket(cluster);
+    verifyBucketLayout(bucket3, OZONE_OM_METADATA_LAYOUT_PREFIX, true);
+
+    // case-6) Configured cluster layout as SIMPLE. Bucket exists with PREFIX
+    // layout format. OM startup should fail.
+    conf.set(OZONE_OM_METADATA_LAYOUT, OZONE_OM_METADATA_LAYOUT_DEFAULT);
+    cluster.getOzoneManager().stop();
+    verifyOMStartupFailure(OZONE_OM_METADATA_LAYOUT_DEFAULT);
+    verifyOMRestartFailure(OZONE_OM_METADATA_LAYOUT_DEFAULT);
+  }
+
+  private void verifyBucketLayout(OzoneBucket bucket, String metadataLayout,
+      boolean isFSOBucket) {
+    Assert.assertNotNull(bucket);
+    Assert.assertEquals(2, bucket.getMetadata().size());
+    Assert.assertEquals(isFSOBucket,
+        OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata()));
+    Assert.assertEquals(metadataLayout,
+        bucket.getMetadata().get(OZONE_OM_METADATA_LAYOUT));
+  }
+
+  private void verifyOMStartupFailure(String clusterLayout) {
+    try {
+      cluster.getOzoneManager().start();
+      Assert.fail("Should fail OM startup in " + clusterLayout + " layout");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "Failed to start OM in " + clusterLayout + " layout format",
+          ioe);
+    }
+    cluster.getOzoneManager().stop();
+  }
+
+  private void verifyOMRestartFailure(String clusterLayout) {
+    try {
+      cluster.getOzoneManager().restart();
+      Assert.fail("Should fail OM startup in " + clusterLayout + " layout");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "Failed to start OM in " + clusterLayout + " layout format",
+          ioe);
+    }
+    cluster.getOzoneManager().stop();
+  }
+
+  private void verifyOmStartWithInvalidConfig(String clusterLayout)
+      throws IOException {
+    try {
+      cluster.getOzoneManager().start();
+      Assert.fail("Should fail OM startup in " + clusterLayout + " layout");
+    } catch (IllegalArgumentException iae) {
+      GenericTestUtils.assertExceptionContains(
+          "Failed to start OM in " + clusterLayout + " layout format", iae);
+    }
+    cluster.getOzoneManager().stop();
+  }
+
+  private void verifyOmRestartWithInvalidConfig(String clusterLayout)
+      throws IOException {
+    try {
+      cluster.getOzoneManager().restart();
+      Assert.fail("Should fail OM startup in " + clusterLayout + " layout");
+    } catch (IllegalArgumentException iae) {
+      GenericTestUtils.assertExceptionContains(
+          "Failed to start OM in " + clusterLayout + " layout format", iae);
+    }
+    cluster.getOzoneManager().stop();
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
new file mode 100644
index 0000000..a50c213
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
@@ -0,0 +1,722 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.ozone.OzoneFileSystem;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.KeyOutputStream;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.AfterClass;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Arrays;
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_EXISTS;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/**
+ * Tests to verify Object store with prefix enabled cases.
+ */
+public class TestObjectStoreWithFSO {
+
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
+  private static String volumeName;
+  private static String bucketName;
+  private static FileSystem fs;
+
+  @Rule
+  public Timeout timeout = new Timeout(1200000);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    TestOMRequestUtils.configureFSOptimizedPaths(conf,
+            true, OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+            .setClusterId(clusterId)
+            .setScmId(scmId)
+            .setOmId(omId)
+            .build();
+    cluster.waitForClusterToBeReady();
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+            OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+            bucket.getVolumeName());
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+    fs = FileSystem.get(conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    deleteRootDir();
+  }
+
+  /**
+   * Cleanup files and directories.
+   *
+   * @throws IOException DB failure
+   */
+  private void deleteRootDir() throws IOException {
+    Path root = new Path("/");
+    FileStatus[] fileStatuses = fs.listStatus(root);
+
+    if (fileStatuses == null) {
+      return;
+    }
+
+    for (FileStatus fStatus : fileStatuses) {
+      fs.delete(fStatus.getPath(), true);
+    }
+
+    fileStatuses = fs.listStatus(root);
+    if (fileStatuses != null) {
+      Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
+    }
+  }
+
+  @Test
+  public void testCreateKey() throws Exception {
+    String parent = "a/b/c/";
+    String file = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + file;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openFileTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    // before file creation
+    verifyKeyInFileTable(openFileTable, file, 0, true);
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    KeyOutputStream keyOutputStream =
+            (KeyOutputStream) ozoneOutputStream.getOutputStream();
+    long clientID = keyOutputStream.getClientID();
+
+    OmDirectoryInfo dirPathC = getDirInfo(parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), false);
+
+    ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0,
+            data.length());
+    ozoneOutputStream.close();
+
+    Table<String, OmKeyInfo> fileTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+    // After closing the file. File entry should be removed from openFileTable
+    // and it should be added to fileTable.
+    verifyKeyInFileTable(fileTable, file, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), true);
+
+    ozoneBucket.deleteKey(key);
+
+    // after key delete
+    verifyKeyInFileTable(fileTable, file, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openFileTable, clientID, file,
+            dirPathC.getObjectID(), true);
+  }
+
+  @Test
+  public void testLookupKey() throws Exception {
+    String parent = "a/b/c/";
+    String fileName = "key" + RandomStringUtils.randomNumeric(5);
+    String key = parent + fileName;
+
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    Table<String, OmKeyInfo> openFileTable =
+            cluster.getOzoneManager().getMetadataManager().getOpenKeyTable();
+
+    String data = "random data";
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key,
+            data.length(), ReplicationType.RATIS, ReplicationFactor.ONE,
+            new HashMap<>());
+
+    KeyOutputStream keyOutputStream =
+            (KeyOutputStream) ozoneOutputStream.getOutputStream();
+    long clientID = keyOutputStream.getClientID();
+
+    OmDirectoryInfo dirPathC = getDirInfo(parent);
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+
+    // after file creation
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), false);
+
+    ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0,
+            data.length());
+
+    // open key
+    try {
+      ozoneBucket.getKey(key);
+      fail("Should throw exception as fileName is not visible and its still " +
+              "open for writing!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
+    }
+
+    ozoneOutputStream.close();
+
+    OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
+    Assert.assertEquals(key, keyDetails.getName());
+
+    Table<String, OmKeyInfo> fileTable =
+            cluster.getOzoneManager().getMetadataManager().getKeyTable();
+
+    // When closing the key, entry should be removed from openFileTable
+    // and it should be added to fileTable.
+    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), false);
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), true);
+
+    ozoneBucket.deleteKey(key);
+
+    // get deleted key
+    try {
+      ozoneBucket.getKey(key);
+      fail("Should throw exception as fileName not exists!");
+    } catch (OMException ome) {
+      // expected
+      assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
+    }
+
+    // after key delete
+    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
+    verifyKeyInOpenFileTable(openFileTable, clientID, fileName,
+            dirPathC.getObjectID(), true);
+  }
+
+  /**
+   * Verify listKeys at different levels.
+   *
+   *                  buck-1
+   *                    |
+   *                    a
+   *                    |
+   *      -----------------------------------
+   *     |              |                       |
+   *     b1             b2                      b3
+   *    -----           --------               ----------
+   *   |      |        |    |   |             |    |     |
+   *  c1     c2        d1   d2  d3             e1   e2   e3
+   *  |      |         |    |   |              |    |    |
+   * c1.tx  c2.tx   d11.tx  | d31.tx           |    |    e31.tx
+   *                      --------             |   e21.tx
+   *                     |        |            |
+   *                   d21.tx   d22.tx        e11.tx
+   *
+   * Above is the FS tree structure.
+   */
+  @Test
+  public void testListKeysAtDifferentLevels() throws Exception {
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    String keyc1 = "/a/b1/c1/c1.tx";
+    String keyc2 = "/a/b1/c2/c2.tx";
+
+    String keyd13 = "/a/b2/d1/d11.tx";
+    String keyd21 = "/a/b2/d2/d21.tx";
+    String keyd22 = "/a/b2/d2/d22.tx";
+    String keyd31 = "/a/b2/d3/d31.tx";
+
+    String keye11 = "/a/b3/e1/e11.tx";
+    String keye21 = "/a/b3/e2/e21.tx";
+    String keye31 = "/a/b3/e3/e31.tx";
+
+    LinkedList<String> keys = new LinkedList<>();
+    keys.add(keyc1);
+    keys.add(keyc2);
+
+    keys.add(keyd13);
+    keys.add(keyd21);
+    keys.add(keyd22);
+    keys.add(keyd31);
+
+    keys.add(keye11);
+    keys.add(keye21);
+    keys.add(keye31);
+
+    int length = 10;
+    byte[] input = new byte[length];
+    Arrays.fill(input, (byte)96);
+
+    createKeys(ozoneBucket, keys);
+
+    // Root level listing keys
+    Iterator<? extends OzoneKey> ozoneKeyIterator =
+        ozoneBucket.listKeys(null, null);
+    verifyFullTreeStructure(ozoneKeyIterator);
+
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a/", null);
+    verifyFullTreeStructure(ozoneKeyIterator);
+
+    LinkedList<String> expectedKeys;
+
+    // Intermediate level keyPrefix - 2nd level
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a///b2///", null);
+    expectedKeys = new LinkedList<>();
+    expectedKeys.add("a/b2/");
+    expectedKeys.add("a/b2/d1/");
+    expectedKeys.add("a/b2/d2/");
+    expectedKeys.add("a/b2/d3/");
+    expectedKeys.add("a/b2/d1/d11.tx");
+    expectedKeys.add("a/b2/d2/d21.tx");
+    expectedKeys.add("a/b2/d2/d22.tx");
+    expectedKeys.add("a/b2/d3/d31.tx");
+    checkKeyList(ozoneKeyIterator, expectedKeys);
+
+    // Intermediate level keyPrefix - 3rd level
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a/b2/d1", null);
+    expectedKeys = new LinkedList<>();
+    expectedKeys.add("a/b2/d1/");
+    expectedKeys.add("a/b2/d1/d11.tx");
+    checkKeyList(ozoneKeyIterator, expectedKeys);
+
+    // Boundary of a level
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a/b2/d2", "a/b2/d2/d21.tx");
+    expectedKeys = new LinkedList<>();
+    expectedKeys.add("a/b2/d2/d22.tx");
+    checkKeyList(ozoneKeyIterator, expectedKeys);
+
+    // Boundary case - last node in the depth-first-traversal
+    ozoneKeyIterator =
+        ozoneBucket.listKeys("a/b3/e3", "a/b3/e3/e31.tx");
+    expectedKeys = new LinkedList<>();
+    checkKeyList(ozoneKeyIterator, expectedKeys);
+  }
+
+  private void verifyFullTreeStructure(Iterator<? extends OzoneKey> keyItr) {
+    LinkedList<String> expectedKeys = new LinkedList<>();
+    expectedKeys.add("a/");
+    expectedKeys.add("a/b1/");
+    expectedKeys.add("a/b2/");
+    expectedKeys.add("a/b3/");
+    expectedKeys.add("a/b1/c1/");
+    expectedKeys.add("a/b1/c2/");
+    expectedKeys.add("a/b1/c1/c1.tx");
+    expectedKeys.add("a/b1/c2/c2.tx");
+    expectedKeys.add("a/b2/d1/");
+    expectedKeys.add("a/b2/d2/");
+    expectedKeys.add("a/b2/d3/");
+    expectedKeys.add("a/b2/d1/d11.tx");
+    expectedKeys.add("a/b2/d2/d21.tx");
+    expectedKeys.add("a/b2/d2/d22.tx");
+    expectedKeys.add("a/b2/d3/d31.tx");
+    expectedKeys.add("a/b3/e1/");
+    expectedKeys.add("a/b3/e2/");
+    expectedKeys.add("a/b3/e3/");
+    expectedKeys.add("a/b3/e1/e11.tx");
+    expectedKeys.add("a/b3/e2/e21.tx");
+    expectedKeys.add("a/b3/e3/e31.tx");
+    checkKeyList(keyItr, expectedKeys);
+  }
+
+  @Test
+  public void testListKeysWithNotNormalizedPath() throws Exception {
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
+    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
+    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
+    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
+
+    String key1 = "/dir1///dir2/file1/";
+    String key2 = "/dir1///dir2/file2/";
+    String key3 = "/dir1///dir2/file3/";
+
+    LinkedList<String> keys = new LinkedList<>();
+    keys.add("dir1/");
+    keys.add("dir1/dir2/");
+    keys.add(OmUtils.normalizeKey(key1, false));
+    keys.add(OmUtils.normalizeKey(key2, false));
+    keys.add(OmUtils.normalizeKey(key3, false));
+
+    int length = 10;
+    byte[] input = new byte[length];
+    Arrays.fill(input, (byte)96);
+
+    createKey(ozoneBucket, key1, 10, input);
+    createKey(ozoneBucket, key2, 10, input);
+    createKey(ozoneBucket, key3, 10, input);
+
+    // Iterator with key name as prefix.
+
+    Iterator<? extends OzoneKey> ozoneKeyIterator =
+            ozoneBucket.listKeys("/dir1//", null);
+
+    checkKeyList(ozoneKeyIterator, keys);
+
+    // Iterator with with normalized key prefix.
+    ozoneKeyIterator =
+            ozoneBucket.listKeys("dir1/");
+
+    checkKeyList(ozoneKeyIterator, keys);
+
+    // Iterator with key name as previous key.
+    ozoneKeyIterator = ozoneBucket.listKeys(null,
+            "/dir1///dir2/file1/");
+
+    // Remove keys before //dir1/dir2/file1
+    keys.remove("dir1/");
+    keys.remove("dir1/dir2/");
+    keys.remove("dir1/dir2/file1");
+
+    checkKeyList(ozoneKeyIterator, keys);
+
+    // Iterator with  normalized key as previous key.
+    ozoneKeyIterator = ozoneBucket.listKeys(null,
+            OmUtils.normalizeKey(key1, false));
+
+    checkKeyList(ozoneKeyIterator, keys);
+  }
+
+  private void checkKeyList(Iterator<? extends OzoneKey > ozoneKeyIterator,
+      List<String> keys) {
+
+    LinkedList<String> outputKeys = new LinkedList<>();
+    while (ozoneKeyIterator.hasNext()) {
+      OzoneKey ozoneKey = ozoneKeyIterator.next();
+      outputKeys.add(ozoneKey.getName());
+    }
+
+    Assert.assertEquals(keys, outputKeys);
+  }
+
+  private void createKeys(OzoneBucket ozoneBucket, List<String> keys)
+      throws Exception {
+    int length = 10;
+    byte[] input = new byte[length];
+    Arrays.fill(input, (byte) 96);
+    for (String key : keys) {
+      createKey(ozoneBucket, key, 10, input);
+    }
+  }
+
+  private void createKey(OzoneBucket ozoneBucket, String key, int length,
+      byte[] input) throws Exception {
+
+    OzoneOutputStream ozoneOutputStream =
+            ozoneBucket.createKey(key, length);
+
+    ozoneOutputStream.write(input);
+    ozoneOutputStream.write(input, 0, 10);
+    ozoneOutputStream.close();
+
+    // Read the key with given key name.
+    OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
+    byte[] read = new byte[length];
+    ozoneInputStream.read(read, 0, length);
+    ozoneInputStream.close();
+
+    String inputString = new String(input, StandardCharsets.UTF_8);
+    Assert.assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
+
+    // Read using filesystem.
+    String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME,
+            bucketName, volumeName, StandardCharsets.UTF_8);
+    OzoneFileSystem o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath),
+            conf);
+    FSDataInputStream fsDataInputStream = o3fs.open(new Path(key));
+    read = new byte[length];
+    fsDataInputStream.read(read, 0, length);
+    ozoneInputStream.close();
+
+    Assert.assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
+  }
+
+  @Test
+  public void testRenameKey() throws IOException {
+    String fromKeyName = UUID.randomUUID().toString();
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, fromKeyName, value);
+
+    // Rename to empty string should fail.
+    String toKeyName = "";
+    try {
+      bucket.renameKey(fromKeyName, toKeyName);
+      fail("Rename to empty string should fail!");
+    } catch (OMException ome) {
+      Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME,
+              ome.getResult());
+    }
+
+    toKeyName = UUID.randomUUID().toString();
+    bucket.renameKey(fromKeyName, toKeyName);
+
+    // Lookup for old key should fail.
+    try {
+      bucket.getKey(fromKeyName);
+      fail("Lookup for old from key name should fail!");
+    } catch (OMException ome) {
+      Assert.assertEquals(KEY_NOT_FOUND, ome.getResult());
+    }
+
+    OzoneKey key = bucket.getKey(toKeyName);
+    Assert.assertEquals(toKeyName, key.getName());
+  }
+
+  @Test
+  public void testKeyRenameWithSubDirs() throws Exception {
+    String keyName1 = "dir1/dir2/file1";
+    String keyName2 = "dir1/dir2/file2";
+
+    String newKeyName1 = "dir1/key1";
+    String newKeyName2 = "dir1/key2";
+
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, keyName1, value);
+    createTestKey(bucket, keyName2, value);
+
+    bucket.renameKey(keyName1, newKeyName1);
+    bucket.renameKey(keyName2, newKeyName2);
+
+    // new key should exist
+    Assert.assertEquals(newKeyName1, bucket.getKey(newKeyName1).getName());
+    Assert.assertEquals(newKeyName2, bucket.getKey(newKeyName2).getName());
+
+    // old key should not exist
+    assertKeyRenamedEx(bucket, keyName1);
+    assertKeyRenamedEx(bucket, keyName2);
+  }
+
+  @Test
+  public void testRenameToAnExistingKey() throws Exception {
+    String keyName1 = "dir1/dir2/file1";
+    String keyName2 = "dir1/dir2/file2";
+
+    String value = "sample value";
+    OzoneClient client = cluster.getClient();
+    ObjectStore objectStore = client.getObjectStore();
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    createTestKey(bucket, keyName1, value);
+    createTestKey(bucket, keyName2, value);
+
+    try {
+      bucket.renameKey(keyName1, keyName2);
+      fail("Should throw exception as destin key already exists!");
+    } catch (OMException e) {
+      Assert.assertEquals(KEY_ALREADY_EXISTS, e.getResult());
+    }
+  }
+
+  private void assertKeyRenamedEx(OzoneBucket bucket, String keyName)
+          throws Exception {
+    try {
+      bucket.getKey(keyName);
+      fail("Should throw KeyNotFound as the key got renamed!");
+    } catch (OMException ome) {
+      Assert.assertEquals(KEY_NOT_FOUND, ome.getResult());
+    }
+  }
+
+  private void createTestKey(OzoneBucket bucket, String keyName,
+      String keyValue) throws IOException {
+    OzoneOutputStream out = bucket.createKey(keyName,
+            keyValue.getBytes(StandardCharsets.UTF_8).length, STAND_ALONE,
+            ONE, new HashMap<>());
+    out.write(keyValue.getBytes(StandardCharsets.UTF_8));
+    out.close();
+    OzoneKey key = bucket.getKey(keyName);
+    Assert.assertEquals(keyName, key.getName());
+  }
+
+  private OmDirectoryInfo getDirInfo(String parentKey) throws Exception {
+    OMMetadataManager omMetadataManager =
+            cluster.getOzoneManager().getMetadataManager();
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(parentKey, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  private void verifyKeyInFileTable(Table<String, OmKeyInfo> fileTable,
+      String fileName, long parentID, boolean isEmpty) throws IOException {
+
+    String dbFileKey = parentID + OM_KEY_PREFIX + fileName;
+    OmKeyInfo omKeyInfo = fileTable.get(dbFileKey);
+    if (isEmpty) {
+      Assert.assertNull("Table is not empty!", omKeyInfo);
+    } else {
+      Assert.assertNotNull("Table is empty!", omKeyInfo);
+      // used startsWith because the key format is,
+      // <parentID>/fileName/<clientID> and clientID is not visible.
+      Assert.assertEquals("Invalid Key: " + omKeyInfo.getObjectInfo(),
+              omKeyInfo.getKeyName(), fileName);
+      Assert.assertEquals("Invalid Key", parentID,
+              omKeyInfo.getParentObjectID());
+    }
+  }
+
+  private void verifyKeyInOpenFileTable(Table<String, OmKeyInfo> openFileTable,
+      long clientID, String fileName, long parentID, boolean isEmpty)
+          throws IOException, TimeoutException, InterruptedException {
+    String dbOpenFileKey =
+            parentID + OM_KEY_PREFIX + fileName + OM_KEY_PREFIX + clientID;
+
+    if (isEmpty) {
+      // wait for DB updates
+      GenericTestUtils.waitFor(() -> {
+        try {
+          OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
+          return omKeyInfo == null;
+        } catch (IOException e) {
+          Assert.fail("DB failure!");
+          return false;
+        }
+
+      }, 1000, 120000);
+    } else {
+      OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
+      Assert.assertNotNull("Table is empty!", omKeyInfo);
+      // used startsWith because the key format is,
+      // <parentID>/fileName/<clientID> and clientID is not visible.
+      Assert.assertEquals("Invalid Key: " + omKeyInfo.getObjectInfo(),
+              omKeyInfo.getKeyName(), fileName);
+      Assert.assertEquals("Invalid Key", parentID,
+              omKeyInfo.getParentObjectID());
+    }
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java
new file mode 100644
index 0000000..bcbea56
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
+
+/**
+ * Test recursive acl checks for delete and rename for FSO Buckets.
+ */
+public class TestRecursiveAclWithFSO {
+
+  @Rule public Timeout timeout = Timeout.seconds(120);
+
+  private MiniOzoneCluster cluster;
+
+  private final UserGroupInformation adminUser =
+      UserGroupInformation.createUserForTesting("om", new String[] {"ozone"});
+  private final UserGroupInformation user1 = UserGroupInformation
+      .createUserForTesting("user1", new String[] {"test1"});
+  private final UserGroupInformation user2 = UserGroupInformation
+      .createUserForTesting("user2", new String[] {"test2"});
+
+  @Before
+  public void init() throws Exception {
+    // loginUser is the user running this test.
+    // Implication: loginUser is automatically added to the OM admin list.
+    UserGroupInformation.setLoginUser(adminUser);
+    // ozone.acl.enabled = true
+    // start a cluster
+    startCluster();
+  }
+
+  @Test
+  public void testKeyDeleteAndRenameWithoutPermission() throws Exception {
+
+    List<String> keys = new ArrayList<>();
+    // Create volumes with user1
+
+    OzoneClient client = cluster.getClient();
+    ObjectStore objectStore = client.getObjectStore();
+
+    /* r = READ, w = WRITE, c = CREATE, d = DELETE
+       l = LIST, a = ALL, n = NONE, x = READ_ACL, y = WRITE_ACL */
+    String aclWorldAll = "world::a";
+    createVolumeWithOwnerAndAcl(objectStore, "volume1", "user1", aclWorldAll);
+
+    // Login as user1, create directories and keys
+    UserGroupInformation.setLoginUser(user1);
+    client = cluster.getClient();
+    objectStore = client.getObjectStore();
+
+    OzoneVolume volume = objectStore.getVolume("volume1");
+
+    BucketArgs omBucketArgs =
+        BucketArgs.newBuilder().setStorageType(StorageType.DISK).build();
+
+    // create bucket with user1
+    volume.createBucket("bucket1", omBucketArgs);
+    setBucketAcl(objectStore, volume.getName(), "bucket1", aclWorldAll);
+    OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+
+    /**
+     *                       buck-1
+     *                        |
+     *                        a
+     *                        |
+     *          ------------------------------------
+     *         |           |              |        |
+     *         b1          b2             b3      file1
+     *       -----       ------           -----
+     *       |    |      |    |          |    |
+     *      c1   c2     d1   d2          e1   e2
+     *       |    |      |    |           |    |
+     *       f1   f2     f3  --------     f5   f6
+     *                      |        |
+     *                    d21        file2
+     *                     |
+     *                     f4
+     *
+     *     Test Case 1 :
+     *     Remove delete acl from file File2
+     *     Try deleting b2
+     *
+     *     Test case 2:
+     *     Remove delete acl fro dir c2
+     *     Try deleting b1
+     *
+     *     Test case 3
+     *     try deleting b3
+     */
+
+    String keyf1 = "a/b1/c1/f1";
+    String keyf2 = "a/b1/c2/f2";
+    String keyf3 = "a/b2/d1/f3";
+    String keyf4 = "a/b2/d2/d21/f4";
+    String keyf5 = "/a/b3/e1/f5";
+    String keyf6 = "/a/b3/e2/f6";
+
+    String file1 = "a/" + "file" + RandomStringUtils.randomNumeric(5);
+    String file2 = "a/b2/d2/" + "file" + RandomStringUtils.randomNumeric(5);
+
+    keys.add(keyf1);
+    keys.add(keyf2);
+    keys.add(keyf3);
+    keys.add(keyf4);
+    keys.add(keyf5);
+    keys.add(keyf6);
+    keys.add(file1);
+    keys.add(file2);
+
+    createKeys(objectStore, ozoneBucket, keys);
+
+    // Test case 1
+    // Remove acls from file2
+    // Delete/Rename on directory a/b2 should throw permission denied
+    // (since file2 is a child)
+    removeAclsFromKey(objectStore, ozoneBucket, file2);
+    OzoneObj ozoneObj;
+    List<OzoneAcl> aclList1;
+
+    UserGroupInformation.setLoginUser(user2);
+    client = cluster.getClient();
+    objectStore = client.getObjectStore();
+    volume = objectStore.getVolume("volume1");
+    ozoneBucket = volume.getBucket("bucket1");
+
+    // perform  delete
+    try {
+      ozoneBucket.deleteDirectory("a/b2", true);
+      Assert.fail("Should throw permission denied !");
+    } catch (OMException ome) {
+      // expect permission error
+      Assert.assertEquals("Permission check failed",
+          OMException.ResultCodes.PERMISSION_DENIED, ome.getResult());
+    }
+
+    // perform rename
+    try {
+      ozoneBucket.renameKey("a/b2", "a/b2_renamed");
+      Assert.fail("Should throw permission denied !");
+    } catch (OMException ome) {
+      // expect permission error
+      Assert.assertEquals("Permission check failed",
+          OMException.ResultCodes.PERMISSION_DENIED, ome.getResult());
+    }
+
+    // Test case 2
+    // Remove acl from directory c2, delete/rename a/b1 should throw
+    // permission denied since c2 is a subdirectory
+
+    UserGroupInformation.setLoginUser(user1);
+    removeAclsFromKey(objectStore, ozoneBucket, "a/b1/c2");
+
+    UserGroupInformation.setLoginUser(user2);
+    // perform  delete
+    try {
+      ozoneBucket.deleteDirectory("a/b1", true);
+      Assert.fail("Should throw permission denied !");
+    } catch (OMException ome) {
+      // expect permission error
+      Assert.assertEquals("Permission check failed",
+          OMException.ResultCodes.PERMISSION_DENIED, ome.getResult());
+    }
+
+    // perform rename
+    try {
+      ozoneBucket.renameKey("a/b1", "a/b1_renamed");
+      Assert.fail("Should throw permission denied !");
+    } catch (OMException ome) {
+      // expect permission error
+      Assert.assertEquals("Permission check failed",
+          OMException.ResultCodes.PERMISSION_DENIED, ome.getResult());
+    }
+
+    // Test case 3
+    // delete b3 and this shouldn't throw exception because acls have not
+    // been removed from subpaths.
+    ozoneBucket.deleteDirectory("a/b3", true);
+  }
+
+  private void removeAclsFromKey(ObjectStore objectStore,
+      OzoneBucket ozoneBucket, String key) throws IOException {
+    OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder().setKeyName(key)
+        .setBucketName(ozoneBucket.getName())
+        .setVolumeName(ozoneBucket.getVolumeName())
+        .setStoreType(OzoneObj.StoreType.OZONE)
+        .setResType(OzoneObj.ResourceType.KEY).build();
+    List<OzoneAcl> aclList1 = objectStore.getAcl(ozoneObj);
+    for (OzoneAcl acl : aclList1) {
+      objectStore.removeAcl(ozoneObj, acl);
+    }
+  }
+
+  /**
+   * Create a MiniOzoneCluster for testing.
+   */
+  private void startCluster() throws Exception {
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    String omId = UUID.randomUUID().toString();
+
+    // Use native impl here, default impl doesn't do actual checks
+    conf.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
+    // Note: OM doesn't support live config reloading
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+
+    TestOMRequestUtils.configureFSOptimizedPaths(conf, true,
+        OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+
+    cluster =
+        MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId)
+            .setScmId(scmId).setOmId(omId).build();
+    cluster.waitForClusterToBeReady();
+
+  }
+
+  @After
+  public void stopCluster() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  private void createVolumeWithOwnerAndAcl(ObjectStore objectStore,
+      String volumeName, String ownerName, String aclString)
+      throws IOException {
+    ClientProtocol proxy = objectStore.getClientProxy();
+    objectStore.createVolume(volumeName);
+    proxy.setVolumeOwner(volumeName, ownerName);
+    setVolumeAcl(objectStore, volumeName, aclString);
+  }
+
+  /**
+   * Helper function to set volume ACL.
+   */
+  private void setVolumeAcl(ObjectStore objectStore, String volumeName,
+      String aclString) throws IOException {
+    OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName)
+        .setResType(OzoneObj.ResourceType.VOLUME).setStoreType(OZONE).build();
+    Assert.assertTrue(objectStore.setAcl(obj, OzoneAcl.parseAcls(aclString)));
+  }
+
+  /**
+   * Helper function to set bucket ACL.
+   */
+  private void setBucketAcl(ObjectStore objectStore, String volumeName,
+      String bucket, String aclString) throws IOException {
+    OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName)
+        .setBucketName(bucket).setResType(OzoneObj.ResourceType.BUCKET)
+        .setStoreType(OZONE).build();
+    Assert.assertTrue(objectStore.setAcl(obj, OzoneAcl.parseAcls(aclString)));
+  }
+
+  /**
+   * Helper function to set key ACL.
+   */
+  private void setKeyAcl(ObjectStore objectStore, String volumeName,
+      String bucket, String key, String aclString) throws IOException {
+    OzoneObj obj = OzoneObjInfo.Builder.newBuilder().setVolumeName(volumeName)
+        .setBucketName(bucket).setKeyName(key)
+        .setResType(OzoneObj.ResourceType.KEY).setStoreType(OZONE).build();
+    Assert.assertTrue(objectStore.setAcl(obj, OzoneAcl.parseAcls(aclString)));
+  }
+
+  private void createKeys(ObjectStore objectStore, OzoneBucket ozoneBucket,
+      List<String> keys) throws Exception {
+    int length = 10;
+    String aclWorldAll = "world::a";
+    byte[] input = new byte[length];
+    Arrays.fill(input, (byte) 96);
+    for (String key : keys) {
+      createKey(ozoneBucket, key, 10, input);
+      setKeyAcl(objectStore, ozoneBucket.getVolumeName(), ozoneBucket.getName(),
+          key, aclWorldAll);
+    }
+  }
+
+  private void createKey(OzoneBucket ozoneBucket, String key, int length,
+      byte[] input) throws Exception {
+    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, length);
+    ozoneOutputStream.write(input);
+    ozoneOutputStream.write(input, 0, 10);
+    ozoneOutputStream.close();
+  }
+
+}
+
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 846ed9a..650e9bb 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -95,6 +95,8 @@
   RecoverTrash = 92;
 
   RevokeS3Secret = 93;
+
+  PurgePaths = 94;
 }
 
 message OMRequest {
@@ -169,6 +171,8 @@
   optional RecoverTrashRequest              RecoverTrashRequest            = 92;
 
   optional RevokeS3SecretRequest            RevokeS3SecretRequest          = 93;
+
+  optional PurgePathsRequest                purgePathsRequest              = 94;
 }
 
 message OMResponse {
@@ -239,6 +243,7 @@
 
   optional ListTrashResponse                  listTrashResponse            = 91;
   optional RecoverTrashResponse               RecoverTrashResponse         = 92;
+  optional PurgePathsResponse                 purgePathsResponse           = 93;
 }
 
 enum Status {
@@ -325,6 +330,7 @@
 
     QUOTA_ERROR = 67;
 
+    DIRECTORY_NOT_EMPTY = 68;
 }
 
 /**
@@ -736,6 +742,9 @@
     // This will be set by leader OM in HA and update the original request.
     optional FileEncryptionInfoProto fileEncryptionInfo = 15;
     optional bool latestVersionLocation = 16;
+
+    // This will be set when user performs delete directory recursively.
+    optional bool recursive = 17;
 }
 
 message KeyLocation {
@@ -779,6 +788,18 @@
     repeated OzoneAclInfo acls = 13;
     optional uint64 objectID = 14;
     optional uint64 updateID = 15;
+    optional uint64 parentID = 16;
+}
+
+message DirectoryInfo {
+    required string name = 1;
+    required uint64 creationTime = 2;
+    required uint64 modificationTime = 3;
+    repeated hadoop.hdds.KeyValue metadata = 4;
+    repeated OzoneAclInfo acls = 5;
+    required uint64 objectID = 6;
+    required uint64 updateID = 7;
+    required uint64 parentID = 8;
 }
 
 message RepeatedKeyInfo {
@@ -945,6 +966,16 @@
 
 }
 
+message PurgePathsRequest {
+    repeated string deletedDirs = 1;
+    repeated KeyInfo deletedSubFiles = 2;
+    repeated KeyInfo markDeletedSubDirs = 3;
+}
+
+message PurgePathsResponse {
+
+}
+
 message DeleteOpenKeysRequest {
   repeated OpenKeyBucket openKeysPerBucket = 1;
 }
@@ -1090,6 +1121,7 @@
     repeated PartKeyInfo partKeyInfoList = 5;
     optional uint64 objectID = 6;
     optional uint64 updateID = 7;
+    optional uint64 parentID = 8;
 }
 
 message PartKeyInfo {
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index e0749c7..bcbef0c 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
@@ -375,6 +376,12 @@
       String bucketName, String prefix) throws IOException;
 
   /**
+   * Gets the DirectoryTable.
+   * @return Table.
+   */
+  Table<String, OmDirectoryInfo> getDirectoryTable();
+
+  /**
    * Return table mapped to the specified table name.
    * @param tableName
    * @return Table
@@ -398,4 +405,43 @@
 
   TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
       getKeyIterator();
+
+  /**
+   * Given parent object id and path component name, return the corresponding
+   * DB 'prefixKey' key.
+   *
+   * @param parentObjectId - parent object Id
+   * @param pathComponentName   - path component name
+   * @return DB directory key as String.
+   */
+  String getOzonePathKey(long parentObjectId, String pathComponentName);
+
+  /**
+   * Returns DB key name of an open file in OM metadata store. Should be
+   * #open# prefix followed by actual leaf node name.
+   *
+   * @param parentObjectId - parent object Id
+   * @param fileName       - file name
+   * @param id             - client id for this open request
+   * @return DB directory key as String.
+   */
+  String getOpenFileName(long parentObjectId, String fileName, long id);
+
+  /**
+   * Returns the DB key name of a multipart upload key in OM metadata store.
+   *
+   * @param parentObjectId - parent object Id
+   * @param fileName       - file name
+   * @param uploadId       - the upload id for this key
+   * @return bytes of DB key.
+   */
+  String getMultipartKey(long parentObjectId, String fileName, String uploadId);
+
+  /**
+   * Get Deleted Directory Table.
+   *
+   * @return Deleted Directory Table.
+   */
+  Table<String, OmKeyInfo> getDeletedDirTable();
+
 }
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
new file mode 100644
index 0000000..ba592a9
--- /dev/null
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.codec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo;
+
+import java.io.IOException;
+
+/**
+ * Codec to encode OmDirectoryInfo as byte array.
+ */
+public class OmDirectoryInfoCodec implements Codec<OmDirectoryInfo> {
+
+  @Override
+  public byte[] toPersistedFormat(OmDirectoryInfo object) throws IOException {
+    Preconditions
+            .checkNotNull(object, "Null object can't be converted " +
+                    "to byte array.");
+    return object.getProtobuf().toByteArray();
+  }
+
+  @Override
+  public OmDirectoryInfo fromPersistedFormat(byte[] rawData)
+          throws IOException {
+    Preconditions
+            .checkNotNull(rawData,
+                    "Null byte array can't converted to real object.");
+    try {
+      return OmDirectoryInfo.getFromProtobuf(DirectoryInfo.parseFrom(rawData));
+    } catch (InvalidProtocolBufferException e) {
+      throw new IllegalArgumentException(
+              "Can't encode the the raw data from the byte array", e);
+    }
+  }
+
+  @Override
+  public OmDirectoryInfo copyObject(OmDirectoryInfo object) {
+    return object.copyObject();
+  }
+}
+
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
index 3d8ace3..de31d87 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
@@ -32,6 +32,14 @@
 
 /**
  * Codec to encode OmKeyInfo as byte array.
+ *
+ * <p>
+ * If the layout "ozone.om.metadata.layout" is PREFIX and
+ * "ozone.om.enable.filesystem.paths" is TRUE. Then, DB stores only the leaf
+ * node name into the 'keyName' field.
+ * <p>
+ * For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+ * 'keyName' field stores only the leaf node name, which is 'file1'.
  */
 public class OmKeyInfoCodec implements Codec<OmKeyInfo> {
   private static final Logger LOG =
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java
new file mode 100644
index 0000000..a8d66cd
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.hdds.utils.BackgroundService;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT;
+
+/**
+ * This is a background service to delete orphan directories and its
+ * sub paths(sub-dirs and sub-files).
+ *
+ * <p>
+ * This will scan the metadata of om periodically to get the orphan dirs from
+ * DeletedDirectoryTable and find its sub paths. It will fetch all sub-files
+ * from FileTable and move those to DeletedTable so that OM's
+ * KeyDeletingService will cleanup those files later. It will fetch all
+ * sub-directories from the DirectoryTable and move those to
+ * DeletedDirectoryTable so that these will be visited in next iterations.
+ *
+ * <p>
+ * After moving all sub-files and sub-dirs the parent orphan directory will be
+ * deleted by this service. It will continue traversing until all the leaf path
+ * components of an orphan directory is visited.
+ */
+public class DirectoryDeletingService extends BackgroundService {
+
+  private final KeyManager keyManager;
+  private final OzoneManager ozoneManager;
+  private AtomicLong deletedDirsCount;
+  private AtomicLong deletedFilesCount;
+  private final AtomicLong runCount;
+
+  private static ClientId clientId = ClientId.randomId();
+
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int DIR_DELETING_CORE_POOL_SIZE = 1;
+
+  // Number of items(dirs/files) to be batched in an iteration.
+  private final long pathLimitPerTask;
+
+  public DirectoryDeletingService(long interval, TimeUnit unit,
+      long serviceTimeout, OzoneManager ozoneManager) {
+    super("DirectoryDeletingService", interval, unit,
+        DIR_DELETING_CORE_POOL_SIZE, serviceTimeout);
+    this.keyManager = ozoneManager.getKeyManager();
+    this.ozoneManager = ozoneManager;
+    this.deletedDirsCount = new AtomicLong(0);
+    this.deletedFilesCount = new AtomicLong(0);
+    this.runCount = new AtomicLong(0);
+    this.pathLimitPerTask = ozoneManager.getConfiguration()
+        .getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK,
+            OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT);
+  }
+
+  private boolean shouldRun() {
+    if (ozoneManager == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return ozoneManager.isLeaderReady();
+  }
+
+  private boolean isRatisEnabled() {
+    if (ozoneManager == null) {
+      return false;
+    }
+    return ozoneManager.isRatisEnabled();
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new DirectoryDeletingService.DirDeletingTask());
+    return queue;
+  }
+
+  private class DirDeletingTask implements BackgroundTask {
+
+    @Override
+    public int getPriority() {
+      return 0;
+    }
+
+    @Override
+    public BackgroundTaskResult call() throws Exception {
+      if (shouldRun()) {
+        runCount.incrementAndGet();
+        long count = pathLimitPerTask;
+        try {
+          long startTime = Time.monotonicNow();
+          // step-1) Get one pending deleted directory
+          OmKeyInfo pendingDeletedDirInfo = keyManager.getPendingDeletionDir();
+          if (pendingDeletedDirInfo != null) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Pending deleted dir name: {}",
+                  pendingDeletedDirInfo.getKeyName());
+            }
+            // step-1: get all sub directories under the deletedDir
+            List<OmKeyInfo> dirs =
+                keyManager.getPendingDeletionSubDirs(pendingDeletedDirInfo,
+                    count);
+            count = count - dirs.size();
+            List<OmKeyInfo> deletedSubDirList = new ArrayList<>();
+            for (OmKeyInfo dirInfo : dirs) {
+              deletedSubDirList.add(dirInfo);
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("deleted sub dir name: {}",
+                    dirInfo.getKeyName());
+              }
+            }
+
+            // step-2: get all sub files under the deletedDir
+            List<OmKeyInfo> purgeDeletedFiles =
+                keyManager.getPendingDeletionSubFiles(pendingDeletedDirInfo,
+                    count);
+            count = count - purgeDeletedFiles.size();
+
+            if (LOG.isDebugEnabled()) {
+              for (OmKeyInfo fileInfo : purgeDeletedFiles) {
+                LOG.debug("deleted sub file name: {}", fileInfo.getKeyName());
+              }
+            }
+
+            // step-3: Since there is a boundary condition of 'numEntries' in
+            // each batch, check whether the sub paths count reached batch size
+            // limit. If count reached limit then there can be some more child
+            // paths to be visited and will keep the parent deleted directory
+            // for one more pass.
+            List<String> purgeDeletedDirs = new ArrayList<>();
+            if (count > 0) {
+              // TODO: Now, there is only one entry in this list. Maintained
+              //  list data structure becuase this can be extended to add
+              //  more directories within the batchSize limit.
+              purgeDeletedDirs.add(pendingDeletedDirInfo.getPath());
+            }
+
+            if (isRatisEnabled()) {
+              submitPurgePaths(purgeDeletedDirs, purgeDeletedFiles,
+                  deletedSubDirList);
+            }
+            // TODO: need to handle delete with non-ratis
+
+            deletedDirsCount.addAndGet(purgeDeletedDirs.size());
+            deletedFilesCount.addAndGet(purgeDeletedFiles.size());
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Number of dirs deleted: {}, Number of files moved:" +
+                      " {} to DeletedTable, elapsed time: {}ms",
+                  deletedDirsCount, deletedFilesCount,
+                  Time.monotonicNow() - startTime);
+            }
+          }
+        } catch (IOException e) {
+          LOG.error("Error while running delete directories and files " +
+              "background task. Will retry at next run.", e);
+        }
+      }
+
+      // place holder by returning empty results of this call back.
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  /**
+   * Returns the number of dirs deleted by the background service.
+   *
+   * @return Long count.
+   */
+  @VisibleForTesting
+  public long getDeletedDirsCount() {
+    return deletedDirsCount.get();
+  }
+
+  /**
+   * Returns the number of files moved to DeletedTable by the background
+   * service.
+   *
+   * @return Long count.
+   */
+  @VisibleForTesting
+  public long getMovedFilesCount() {
+    return deletedFilesCount.get();
+  }
+
+  /**
+   * Returns the number of times this Background service has run.
+   *
+   * @return Long, run count.
+   */
+  @VisibleForTesting
+  public long getRunCount() {
+    return runCount.get();
+  }
+
+  private int submitPurgePaths(List<String> purgeDeletedDirs,
+      List<OmKeyInfo> purgeDeletedFiles, List<OmKeyInfo> markDirsAsDeleted) {
+    // Put all keys to be purged in a list
+    int deletedCount = 0;
+    OzoneManagerProtocolProtos.PurgePathsRequest.Builder purgePathsRequest =
+        OzoneManagerProtocolProtos.PurgePathsRequest.newBuilder();
+    for (String purgeDir : purgeDeletedDirs) {
+      purgePathsRequest.addDeletedDirs(purgeDir);
+    }
+    for (OmKeyInfo purgeFile : purgeDeletedFiles) {
+      purgePathsRequest.addDeletedSubFiles(
+          purgeFile.getProtobuf(true, CURRENT_VERSION));
+    }
+
+    // Add these directories to deletedDirTable, so that its sub-paths will be
+    // traversed in next iteration to ensure cleanup all sub-children.
+    for (OmKeyInfo dir : markDirsAsDeleted) {
+      purgePathsRequest.addMarkDeletedSubDirs(dir.getProtobuf(CURRENT_VERSION));
+    }
+
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        OzoneManagerProtocolProtos.OMRequest.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.PurgePaths)
+            .setPurgePathsRequest(purgePathsRequest)
+            .setClientId(clientId.toString())
+            .build();
+
+    // Submit Purge paths request to OM
+    try {
+      RaftClientRequest raftClientRequest =
+          createRaftClientRequestForDelete(omRequest);
+      ozoneManager.getOmRatisServer().submitRequest(omRequest,
+          raftClientRequest);
+    } catch (ServiceException e) {
+      LOG.error("PurgePaths request failed. Will retry at next run.");
+      return 0;
+    }
+    return deletedCount;
+  }
+
+
+  private RaftClientRequest createRaftClientRequestForDelete(
+      OzoneManagerProtocolProtos.OMRequest omRequest) {
+    return RaftClientRequest.newBuilder()
+        .setClientId(clientId)
+        .setServerId(ozoneManager.getOmRatisServer().getRaftPeerId())
+        .setGroupId(ozoneManager.getOmRatisServer().getRaftGroupId())
+        .setCallId(runCount.get())
+        .setMessage(
+            Message.valueOf(
+                OMRatisHelper.convertRequestToByteString(omRequest)))
+        .setType(RaftClientRequest.writeRequestType())
+        .build();
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
index 658f503..b569b5d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
@@ -273,4 +273,60 @@
    * @param key
    */
   void refresh(OmKeyInfo key) throws IOException;
+
+  /**
+   * Assume OM has FS namespace like below, deleteDirTable stores absolute
+   * path name as existing KeyDeletionService expects full key name.
+   * For example, if user deletes directory 'd1' then the entry in OM DB looks
+   * like, DBKey = 1030/d3 and DBValue = KeyInfo with keyName "a/b2/d3"
+   *
+   *                   vol1
+   *                    |
+   *                  buck-1
+   *                    |
+   *                    a
+   *                    |
+   *      -----------------------------------
+   *     |             |                     |
+   *     b1            b2                    b3
+   *   -----       ---------               ----------
+   *   |    |      |    |   |             |    |     |
+   *  c1   c2     d1   d2  d3             e1   e2   e3
+   *                   |                  |
+   *               --------               |
+   *              |        |              |
+   *           d21.txt   d22.txt        e11.txt
+   *
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  OmKeyInfo getPendingDeletionDir() throws IOException;
+
+  /**
+   * Returns all sub directories under the given parent directory.
+   *
+   * @param parentInfo
+   * @param numEntries
+   * @return list of dirs
+   * @throws IOException
+   */
+  List<OmKeyInfo> getPendingDeletionSubDirs(OmKeyInfo parentInfo,
+      long numEntries) throws IOException;
+
+  /**
+   * Returns all sub files under the given parent directory.
+   *
+   * @param parentInfo
+   * @param numEntries
+   * @return list of files
+   * @throws IOException
+   */
+  List<OmKeyInfo> getPendingDeletionSubFiles(OmKeyInfo parentInfo,
+      long numEntries) throws IOException;
+
+  /**
+   * Returns the instance of Directory Deleting Service.
+   * @return Background service.
+   */
+  BackgroundService getDirDeletingService();
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 6c28ab0..bcb0ccf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -34,6 +34,7 @@
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
+import java.util.Stack;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.UUID;
@@ -76,6 +77,7 @@
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -95,7 +97,9 @@
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
 import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -129,7 +133,10 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND;
 import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND;
@@ -172,6 +179,7 @@
   private final PrefixManager prefixManager;
 
   private final boolean enableFileSystemPaths;
+  private BackgroundService dirDeletingService;
 
 
   @VisibleForTesting
@@ -246,6 +254,22 @@
           serviceTimeout, configuration);
       keyDeletingService.start();
     }
+
+    // Start directory deletion service for FSO buckets.
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()
+        && dirDeletingService == null) {
+      long dirDeleteInterval = configuration.getTimeDuration(
+          OZONE_DIR_DELETING_SERVICE_INTERVAL,
+          OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT,
+          TimeUnit.MILLISECONDS);
+      long serviceTimeout = configuration.getTimeDuration(
+          OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
+          OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
+          TimeUnit.MILLISECONDS);
+      dirDeletingService = new DirectoryDeletingService(dirDeleteInterval,
+          TimeUnit.SECONDS, serviceTimeout, ozoneManager);
+      dirDeletingService.start();
+    }
   }
 
   KeyProviderCryptoExtension getKMSProvider() {
@@ -258,6 +282,10 @@
       keyDeletingService.shutdown();
       keyDeletingService = null;
     }
+    if (dirDeletingService != null) {
+      dirDeletingService.shutdown();
+      dirDeletingService = null;
+    }
   }
 
   private OmBucketInfo getBucketInfo(String volumeName, String bucketName)
@@ -266,28 +294,6 @@
     return metadataManager.getBucketTable().get(bucketKey);
   }
 
-  private void validateBucket(String volumeName, String bucketName)
-      throws IOException {
-    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-    // Check if bucket exists
-    if (metadataManager.getBucketTable().get(bucketKey) == null) {
-      String volumeKey = metadataManager.getVolumeKey(volumeName);
-      // If the volume also does not exist, we should throw volume not found
-      // exception
-      if (metadataManager.getVolumeTable().get(volumeKey) == null) {
-        LOG.error("volume not found: {}", volumeName);
-        throw new OMException("Volume not found",
-            VOLUME_NOT_FOUND);
-      }
-
-      // if the volume exists but bucket does not exist, throw bucket not found
-      // exception
-      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
-      throw new OMException("Bucket not found",
-          BUCKET_NOT_FOUND);
-    }
-  }
-
   /**
    * Check S3 bucket exists or not.
    * @param volumeName
@@ -318,7 +324,7 @@
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
-    validateBucket(volumeName, bucketName);
+    OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
     String openKey = metadataManager.getOpenKey(
         volumeName, bucketName, keyName, clientID);
 
@@ -433,7 +439,7 @@
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
-    validateBucket(volumeName, bucketName);
+    OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
 
     long currentTime = UniqueId.next();
     OmKeyInfo keyInfo;
@@ -601,7 +607,7 @@
     try {
       metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName,
           bucketName);
-      validateBucket(volumeName, bucketName);
+      OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
       OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey);
       if (keyInfo == null) {
         throw new OMException("Failed to commit key, as " + openKey + "entry " +
@@ -643,9 +649,11 @@
         bucketName);
     OmKeyInfo value = null;
     try {
-      String keyBytes = metadataManager.getOzoneKey(
-          volumeName, bucketName, keyName);
-      value = metadataManager.getKeyTable().get(keyBytes);
+      if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+        value = getOmKeyInfoFSO(volumeName, bucketName, keyName);
+      } else {
+        value = getOmKeyInfo(volumeName, bucketName, keyName);
+      }
     } catch (IOException ex) {
       if (ex instanceof OMException) {
         throw ex;
@@ -665,7 +673,7 @@
         LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName,
                 bucketName, keyName);
       }
-      throw new OMException("Key not found", KEY_NOT_FOUND);
+      throw new OMException("Key:" + keyName + " not found", KEY_NOT_FOUND);
     }
 
     if (args.getLatestVersionLocation()) {
@@ -686,6 +694,34 @@
     return value;
   }
 
+  private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    String keyBytes = metadataManager.getOzoneKey(
+            volumeName, bucketName, keyName);
+    return metadataManager.getKeyTable().get(keyBytes);
+  }
+
+  /**
+   * Look up will return only closed fileInfo. This will return null if the
+   * keyName is a directory or if the keyName is still open for writing.
+   */
+  private OmKeyInfo getOmKeyInfoFSO(String volumeName, String bucketName,
+                                   String keyName) throws IOException {
+    OzoneFileStatus fileStatus =
+            OMFileRequest.getOMKeyInfoIfExists(metadataManager,
+                    volumeName, bucketName, keyName, scmBlockSize);
+    if (fileStatus == null) {
+      return null;
+    }
+    // Appended trailing slash to represent directory to the user
+    if (fileStatus.isDirectory()) {
+      String keyPath = OzoneFSUtils.addTrailingSlashIfNeeded(
+          fileStatus.getKeyInfo().getKeyName());
+      fileStatus.getKeyInfo().setKeyName(keyPath);
+    }
+    return fileStatus.getKeyInfo();
+  }
+
   private void addBlockToken4Read(OmKeyInfo value) throws IOException {
     Preconditions.checkNotNull(value, "OMKeyInfo cannot be null");
     if (grpcBlockTokenEnabled) {
@@ -966,6 +1002,11 @@
   }
 
   @Override
+  public BackgroundService getDirDeletingService() {
+    return dirDeletingService;
+  }
+
+  @Override
   public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws
       IOException {
     Preconditions.checkNotNull(omKeyArgs);
@@ -1404,8 +1445,10 @@
           // than part number marker
           if (partKeyInfoEntry.getKey() > partNumberMarker) {
             PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
+            String partName = getPartName(partKeyInfo, volumeName, bucketName,
+                keyName);
             OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(),
-                partKeyInfo.getPartName(),
+                partName,
                 partKeyInfo.getPartKeyInfo().getModificationTime(),
                 partKeyInfo.getPartKeyInfo().getDataSize());
             omPartInfoList.add(omPartInfo);
@@ -1420,7 +1463,11 @@
 
         if (replicationConfig == null) {
           //if there are no parts, use the replicationType from the open key.
-
+          if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+            multipartKey =
+                getMultipartOpenKeyFSO(volumeName, bucketName, keyName,
+                    uploadID);
+          }
           OmKeyInfo omKeyInfo =
               metadataManager.getOpenKeyTable().get(multipartKey);
 
@@ -1463,6 +1510,48 @@
     }
   }
 
+  private String getPartName(PartKeyInfo partKeyInfo, String volName,
+                             String buckName, String keyName) {
+
+    String partName = partKeyInfo.getPartName();
+
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      String parentDir = OzoneFSUtils.getParentDir(keyName);
+      String partFileName = OzoneFSUtils.getFileName(partKeyInfo.getPartName());
+
+      StringBuilder fullKeyPartName = new StringBuilder();
+      fullKeyPartName.append(OZONE_URI_DELIMITER);
+      fullKeyPartName.append(volName);
+      fullKeyPartName.append(OZONE_URI_DELIMITER);
+      fullKeyPartName.append(buckName);
+      if (StringUtils.isNotEmpty(parentDir)) {
+        fullKeyPartName.append(OZONE_URI_DELIMITER);
+        fullKeyPartName.append(parentDir);
+      }
+      fullKeyPartName.append(OZONE_URI_DELIMITER);
+      fullKeyPartName.append(partFileName);
+
+      return fullKeyPartName.toString();
+    }
+    return partName;
+  }
+
+  private String getMultipartOpenKeyFSO(String volumeName, String bucketName,
+      String keyName, String uploadID) throws IOException {
+    OMMetadataManager metaMgr = ozoneManager.getMetadataManager();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    String bucketKey = metaMgr.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo = metaMgr.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    long parentID =
+        OMFileRequest.getParentID(bucketId, pathComponents, keyName, metaMgr);
+
+    String multipartKey = metaMgr.getMultipartKey(parentID, fileName, uploadID);
+
+    return multipartKey;
+  }
+
   /**
    * Add acl for Ozone object. Return true if acl is added successfully else
    * false.
@@ -1482,7 +1571,7 @@
 
     metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
       OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
       if (keyInfo == null) {
@@ -1526,7 +1615,7 @@
 
     metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
       OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
       if (keyInfo == null) {
@@ -1567,7 +1656,7 @@
 
     metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
       OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
       if (keyInfo == null) {
@@ -1603,12 +1692,16 @@
     String volume = obj.getVolumeName();
     String bucket = obj.getBucketName();
     String keyName = obj.getKeyName();
-
+    OmKeyInfo keyInfo;
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
-      OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
+      if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+        keyInfo = getOmKeyInfoFSO(volume, bucket, keyName);
+      } else {
+        keyInfo = getOmKeyInfo(volume, bucket, keyName);
+      }
       if (keyInfo == null) {
         throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND);
       }
@@ -1651,7 +1744,7 @@
 
     metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
     try {
-      validateBucket(volume, bucket);
+      OMFileRequest.validateBucket(metadataManager, volume, bucket);
       OmKeyInfo keyInfo;
 
       // For Acl Type "WRITE", the key can only be found in
@@ -1659,6 +1752,12 @@
       if (context.getAclRights() == IAccessAuthorizer.ACLType.WRITE) {
         keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
       } else {
+        // Recursive check is done only for ACL_TYPE DELETE
+        // Rename and delete operations will send ACL_TYPE DELETE
+        if (context.isRecursiveAccessCheck()
+            && context.getAclRights() == IAccessAuthorizer.ACLType.DELETE) {
+          return checkChildrenAcls(ozObject, context);
+        }
         try {
           OzoneFileStatus fileStatus = getFileStatus(args);
           keyInfo = fileStatus.getKeyInfo();
@@ -1704,6 +1803,52 @@
   }
 
   /**
+   * check acls for all subpaths of a directory.
+   *
+   * @param ozObject
+   * @param context
+   * @return
+   * @throws IOException
+   */
+  private boolean checkChildrenAcls(OzoneObj ozObject, RequestContext context)
+      throws IOException {
+    OmKeyInfo keyInfo;
+    OzoneFileStatus ozoneFileStatus =
+        ozObject.getOzonePrefixPathViewer().getOzoneFileStatus();
+    keyInfo = ozoneFileStatus.getKeyInfo();
+    // Using stack to check acls for subpaths
+    Stack<OzoneFileStatus> directories = new Stack<>();
+    // check whether given file/dir  has access
+    boolean hasAccess = OzoneAclUtil.checkAclRight(keyInfo.getAcls(), context);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("user:{} has access rights for key:{} :{} ",
+          context.getClientUgi(), ozObject.getKeyName(), hasAccess);
+    }
+    if (ozoneFileStatus.isDirectory() && hasAccess) {
+      directories.add(ozoneFileStatus);
+    }
+    while (!directories.isEmpty() && hasAccess) {
+      ozoneFileStatus = directories.pop();
+      String keyPath = ozoneFileStatus.getTrimmedName();
+      Iterator<? extends OzoneFileStatus> children =
+          ozObject.getOzonePrefixPathViewer().getChildren(keyPath);
+      while (hasAccess && children.hasNext()) {
+        ozoneFileStatus = children.next();
+        keyInfo = ozoneFileStatus.getKeyInfo();
+        hasAccess = OzoneAclUtil.checkAclRight(keyInfo.getAcls(), context);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("user:{} has access rights for key:{} :{} ",
+              context.getClientUgi(), keyInfo.getKeyName(), hasAccess);
+        }
+        if (hasAccess && ozoneFileStatus.isDirectory()) {
+          directories.add(ozoneFileStatus);
+        }
+      }
+    }
+    return hasAccess;
+  }
+
+  /**
    * Helper method to validate ozone object.
    * @param obj
    * */
@@ -1765,6 +1910,11 @@
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
 
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      return getOzoneFileStatusFSO(volumeName, bucketName, keyName,
+              args.getSortDatanodes(), clientAddress,
+              args.getLatestVersionLocation(), false);
+    }
     return getOzoneFileStatus(volumeName, bucketName, keyName,
         args.getRefreshPipeline(), args.getSortDatanodes(),
         args.getLatestVersionLocation(), clientAddress);
@@ -1784,7 +1934,7 @@
     try {
       // Check if this is the root of the filesystem.
       if (keyName.length() == 0) {
-        validateBucket(volumeName, bucketName);
+        OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
         return new OzoneFileStatus();
       }
 
@@ -1834,6 +1984,67 @@
             FILE_NOT_FOUND);
   }
 
+
+  private OzoneFileStatus getOzoneFileStatusFSO(String volumeName,
+      String bucketName, String keyName, boolean sortDatanodes,
+      String clientAddress, boolean latestLocationVersion,
+      boolean skipFileNotFoundError) throws IOException {
+    OzoneFileStatus fileStatus = null;
+    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+            bucketName);
+    try {
+      // Check if this is the root of the filesystem.
+      if (keyName.length() == 0) {
+        OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
+        return new OzoneFileStatus();
+      }
+
+      fileStatus = OMFileRequest.getOMKeyInfoIfExists(metadataManager,
+              volumeName, bucketName, keyName, scmBlockSize);
+
+    } finally {
+      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+              bucketName);
+    }
+
+    if (fileStatus != null) {
+      // if the key is a file then do refresh pipeline info in OM by asking SCM
+      if (fileStatus.isFile()) {
+        OmKeyInfo fileKeyInfo = fileStatus.getKeyInfo();
+        if (latestLocationVersion) {
+          slimLocationVersion(fileKeyInfo);
+        }
+        // refreshPipeline flag check has been removed as part of
+        // https://issues.apache.org/jira/browse/HDDS-3658.
+        // Please refer this jira for more details.
+        refresh(fileKeyInfo);
+
+        if (sortDatanodes) {
+          sortDatanodes(clientAddress, fileKeyInfo);
+        }
+        return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false);
+      } else {
+        return fileStatus;
+      }
+    }
+
+    // Key not found.
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Unable to get file status for the key: volume: {}, bucket:" +
+                      " {}, key: {}, with error: No such file exists.",
+              volumeName, bucketName, keyName);
+    }
+
+    // don't throw exception if this flag is true.
+    if (skipFileNotFoundError) {
+      return fileStatus;
+    }
+
+    throw new OMException("Unable to get file status: volume: " +
+            volumeName + " bucket: " + bucketName + " key: " + keyName,
+            FILE_NOT_FOUND);
+  }
+
   /**
    * Ozone FS api to create a directory. Parent directories if do not exist
    * are created for the input directory.
@@ -1975,10 +2186,17 @@
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
-    OzoneFileStatus fileStatus = getOzoneFileStatus(volumeName, bucketName,
-        keyName, args.getRefreshPipeline(), args.getSortDatanodes(),
-        args.getLatestVersionLocation(), clientAddress);
-      //if key is not of type file or if key is not found we throw an exception
+    OzoneFileStatus fileStatus;
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      fileStatus = getOzoneFileStatusFSO(volumeName, bucketName, keyName,
+              args.getSortDatanodes(), clientAddress,
+              args.getLatestVersionLocation(),false);
+    } else {
+      fileStatus = getOzoneFileStatus(volumeName, bucketName,
+              keyName, args.getRefreshPipeline(), args.getSortDatanodes(),
+              args.getLatestVersionLocation(), clientAddress);
+    }
+    //if key is not of type file or if key is not found we throw an exception
     if (fileStatus.isFile()) {
       // add block token for read.
       addBlockToken4Read(fileStatus.getKeyInfo());
@@ -2078,6 +2296,11 @@
       return fileStatusList;
     }
 
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      return listStatusFSO(args, recursive, startKey, numEntries,
+          clientAddress);
+    }
+
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
     String keyName = args.getKeyName();
@@ -2216,6 +2439,392 @@
     return fileStatusList;
   }
 
+  @SuppressWarnings("methodlength")
+  public List<OzoneFileStatus> listStatusFSO(OmKeyArgs args, boolean recursive,
+      String startKey, long numEntries, String clientAddress)
+          throws IOException {
+    Preconditions.checkNotNull(args, "Key args can not be null");
+
+    // unsorted OMKeyInfo list contains combine results from TableCache and DB.
+    List<OzoneFileStatus> fileStatusFinalList = new ArrayList<>();
+
+    if (numEntries <= 0) {
+      return fileStatusFinalList;
+    }
+
+    /**
+     * A map sorted by OmKey to combine results from TableCache and DB for
+     * each entity - Dir & File.
+     *
+     * Two separate maps are required because the order of seek -> (1)Seek
+     * files in fileTable (2)Seek dirs in dirTable.
+     *
+     * StartKey should be added to the final listStatuses, so if we combine
+     * files and dirs into a single map then directory with lower precedence
+     * will appear at the top of the list even if the startKey is given as
+     * fileName.
+     *
+     * For example, startKey="a/file1". As per the seek order, first fetches
+     * all the files and then it will start seeking all the directories.
+     * Assume a directory name exists "a/b". With one map, the sorted list will
+     * be ["a/b", "a/file1"]. But the expected list is: ["a/file1", "a/b"],
+     * startKey element should always be at the top of the listStatuses.
+     */
+    TreeMap<String, OzoneFileStatus> cacheFileMap = new TreeMap<>();
+    TreeMap<String, OzoneFileStatus> cacheDirMap = new TreeMap<>();
+
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    String seekFileInDB;
+    String seekDirInDB;
+    long prefixKeyInDB;
+    String prefixPath = keyName;
+    int countEntries = 0;
+
+    // TODO: recursive flag=true will be handled in HDDS-4360 jira.
+    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName,
+            bucketName);
+    try {
+      if (Strings.isNullOrEmpty(startKey)) {
+        OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
+        if (fileStatus.isFile()) {
+          return Collections.singletonList(fileStatus);
+        }
+
+        // Not required to search in DeletedTable because all the deleted
+        // keys will be marked directly in dirTable or in keyTable by
+        // breaking the pointer to its sub-dirs and sub-files. So, there is no
+        // issue of inconsistency.
+
+        /*
+         * keyName is a directory.
+         * Say, "/a" is the dir name and its objectID is 1024, then seek
+         * will be doing with "1024/" to get all immediate descendants.
+         */
+        if (fileStatus.getKeyInfo() != null) {
+          prefixKeyInDB = fileStatus.getKeyInfo().getObjectID();
+        } else {
+          // list root directory.
+          String bucketKey = metadataManager.getBucketKey(volumeName,
+                  bucketName);
+          OmBucketInfo omBucketInfo =
+                  metadataManager.getBucketTable().get(bucketKey);
+          prefixKeyInDB = omBucketInfo.getObjectID();
+        }
+        seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+        seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+
+        // Order of seek -> (1)Seek files in fileTable (2)Seek dirs in dirTable
+        // 1. Seek the given key in key table.
+        countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
+                prefixPath, prefixKeyInDB, startKey, countEntries, numEntries);
+        // 2. Seek the given key in dir table.
+        getDirectories(cacheDirMap, seekDirInDB, prefixPath, prefixKeyInDB,
+                startKey, countEntries, numEntries, volumeName, bucketName,
+                recursive);
+      } else {
+        /*
+         * startKey will be used in iterator seek and sets the beginning point
+         * for key traversal.
+         * keyName will be used as parentID where the user has requested to
+         * list the keys from.
+         *
+         * When recursive flag=false, parentID won't change between two pages.
+         * For example: OM has a namespace like,
+         *    /a/1...1M files and /a/b/1...1M files.
+         *    /a/1...1M directories and /a/b/1...1M directories.
+         * Listing "/a", will always have the parentID as "a" irrespective of
+         * the startKey value.
+         */
+
+        // Check startKey is an immediate child of keyName. For example,
+        // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b.
+        if (StringUtils.isNotBlank(keyName) &&
+                !OzoneFSUtils.isImmediateChild(keyName, startKey)) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("StartKey {} is not an immediate child of keyName {}. " +
+                    "Returns empty list", startKey, keyName);
+          }
+          return Collections.emptyList();
+        }
+
+        // assign startKeyPath if prefixPath is empty string.
+        if (StringUtils.isBlank(prefixPath)) {
+          prefixPath = OzoneFSUtils.getParentDir(startKey);
+        }
+
+        OzoneFileStatus fileStatusInfo = getOzoneFileStatusFSO(volumeName,
+                bucketName, startKey, false, null,
+                args.getLatestVersionLocation(),true);
+
+        if (fileStatusInfo != null) {
+          prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID();
+          if(fileStatusInfo.isDirectory()){
+            seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
+                    fileStatusInfo.getKeyInfo().getFileName());
+
+            // Order of seek -> (1) Seek dirs only in dirTable. In OM, always
+            // the order of search is, first seek into fileTable and then
+            // dirTable. So, its not required to search again in the fileTable.
+
+            // Seek the given key in dirTable.
+            getDirectories(cacheDirMap, seekDirInDB, prefixPath,
+                    prefixKeyInDB, startKey, countEntries, numEntries,
+                    volumeName, bucketName, recursive);
+          } else {
+            seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB,
+                    fileStatusInfo.getKeyInfo().getFileName());
+            // begins from the first sub-dir under the parent dir
+            seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
+
+            // 1. Seek the given key in key table.
+            countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB,
+                    prefixPath, prefixKeyInDB, startKey, countEntries,
+                    numEntries);
+            // 2. Seek the given key in dir table.
+            getDirectories(cacheDirMap, seekDirInDB, prefixPath,
+                    prefixKeyInDB, startKey, countEntries, numEntries,
+                    volumeName, bucketName, recursive);
+          }
+        } else {
+          // TODO: HDDS-4364: startKey can be a non-existed key
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("StartKey {} is a non-existed key and returning empty " +
+                    "list", startKey);
+          }
+          return Collections.emptyList();
+        }
+      }
+    } finally {
+      metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName,
+              bucketName);
+    }
+
+    List<OmKeyInfo> keyInfoList = new ArrayList<>();
+    for (OzoneFileStatus fileStatus : cacheFileMap.values()) {
+      fileStatusFinalList.add(fileStatus);
+      keyInfoList.add(fileStatus.getKeyInfo());
+    }
+    for (OzoneFileStatus fileStatus : cacheDirMap.values()) {
+      fileStatusFinalList.add(fileStatus);
+    }
+    if (args.getLatestVersionLocation()) {
+      slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0]));
+    }
+    // refreshPipeline flag check has been removed as part of
+    // https://issues.apache.org/jira/browse/HDDS-3658.
+    // Please refer this jira for more details.
+    refreshPipeline(keyInfoList);
+    if (args.getSortDatanodes()) {
+      sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
+    }
+    return fileStatusFinalList;
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected int getDirectories(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
+      String seekDirInDB, String prefixPath, long prefixKeyInDB,
+      String startKey, int countEntries, long numEntries, String volumeName,
+      String bucketName, boolean recursive) throws IOException {
+
+    // A set to keep track of keys deleted in cache but not flushed to DB.
+    Set<String> deletedKeySet = new TreeSet<>();
+
+    Table dirTable = metadataManager.getDirectoryTable();
+    countEntries = listStatusFindDirsInTableCache(cacheKeyMap, dirTable,
+            prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName,
+            bucketName, countEntries, numEntries, deletedKeySet);
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+            iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      if (deletedKeySet.contains(dirInfo.getPath())) {
+        iterator.next(); // move to next entry in the table
+        // entry is actually deleted in cache and can exists in DB
+        continue;
+      }
+      if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
+              prefixKeyInDB)) {
+        break;
+      }
+
+      // TODO: recursive list will be handled in HDDS-4360 jira.
+      if (!recursive) {
+        String dirName = OMFileRequest.getAbsolutePath(prefixPath,
+                dirInfo.getName());
+        OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
+                bucketName, dirInfo, dirName);
+        cacheKeyMap.put(dirName, new OzoneFileStatus(omKeyInfo, scmBlockSize,
+                true));
+        countEntries++;
+      }
+      // move to next entry in the DirTable
+      iterator.next();
+    }
+
+    return countEntries;
+  }
+
+  private int getFilesFromDirectory(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
+      String seekKeyInDB, String prefixKeyPath, long prefixKeyInDB,
+      String startKey, int countEntries, long numEntries) throws IOException {
+
+    // A set to keep track of keys deleted in cache but not flushed to DB.
+    Set<String> deletedKeySet = new TreeSet<>();
+
+    Table<String, OmKeyInfo> keyTable = metadataManager.getKeyTable();
+    countEntries = listStatusFindFilesInTableCache(cacheKeyMap, keyTable,
+            prefixKeyInDB, seekKeyInDB, prefixKeyPath, startKey,
+            countEntries, numEntries, deletedKeySet);
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+            iterator = keyTable.iterator();
+    iterator.seek(seekKeyInDB);
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmKeyInfo keyInfo = iterator.value().getValue();
+      if (deletedKeySet.contains(keyInfo.getPath())) {
+        iterator.next(); // move to next entry in the table
+        // entry is actually deleted in cache and can exists in DB
+        continue;
+      }
+      if (!OMFileRequest.isImmediateChild(keyInfo.getParentObjectID(),
+              prefixKeyInDB)) {
+        break;
+      }
+
+      keyInfo.setFileName(keyInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              keyInfo.getKeyName());
+      keyInfo.setKeyName(fullKeyPath);
+      cacheKeyMap.put(fullKeyPath,
+              new OzoneFileStatus(keyInfo, scmBlockSize, false));
+      countEntries++;
+      iterator.next(); // move to next entry in the table
+    }
+    return countEntries;
+  }
+
+  /**
+   * Helper function for listStatus to find key in FileTableCache.
+   */
+  @SuppressWarnings("parameternumber")
+  private int listStatusFindFilesInTableCache(
+          TreeMap<String, OzoneFileStatus> cacheKeyMap, Table<String,
+          OmKeyInfo> keyTable, long prefixKeyInDB, String seekKeyInDB,
+          String prefixKeyPath, String startKey, int countEntries,
+          long numEntries, Set<String> deletedKeySet) {
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            cacheIter = keyTable.cacheIterator();
+
+    // TODO: recursive list will be handled in HDDS-4360 jira.
+    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
+      Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
+              cacheIter.next();
+      String cacheKey = entry.getKey().getCacheKey();
+      OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
+      // cacheOmKeyInfo is null if an entry is deleted in cache
+      if(cacheOmKeyInfo == null){
+        deletedKeySet.add(cacheKey);
+        continue;
+      }
+
+      // make OmKeyInfo local copy to reset keyname to "fullKeyPath".
+      // In DB keyName stores only the leaf node but the list
+      // returning to the user should have full path.
+      OmKeyInfo omKeyInfo = cacheOmKeyInfo.copyObject();
+
+      omKeyInfo.setFileName(omKeyInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              omKeyInfo.getKeyName());
+      omKeyInfo.setKeyName(fullKeyPath);
+
+      countEntries = addKeyInfoToFileStatusList(cacheKeyMap, prefixKeyInDB,
+              seekKeyInDB, startKey, countEntries, cacheKey, omKeyInfo,
+              false);
+    }
+    return countEntries;
+  }
+
+  /**
+   * Helper function for listStatus to find key in DirTableCache.
+   */
+  @SuppressWarnings("parameternumber")
+  private int listStatusFindDirsInTableCache(
+          TreeMap<String, OzoneFileStatus> cacheKeyMap, Table<String,
+          OmDirectoryInfo> dirTable, long prefixKeyInDB, String seekKeyInDB,
+          String prefixKeyPath, String startKey, String volumeName,
+          String bucketName, int countEntries, long numEntries,
+          Set<String> deletedKeySet) {
+
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIter = dirTable.cacheIterator();
+    // seekKeyInDB will have two type of values.
+    // 1. "1024/"   -> startKey is null or empty
+    // 2. "1024/b"  -> startKey exists
+    // TODO: recursive list will be handled in HDDS-4360 jira.
+    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
+      Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>> entry =
+              cacheIter.next();
+      String cacheKey = entry.getKey().getCacheKey();
+      OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      // cacheOmKeyInfo is null if an entry is deleted in cache
+      if(cacheOmDirInfo == null){
+        deletedKeySet.add(cacheKey);
+        continue;
+      }
+      String fullDirPath = OMFileRequest.getAbsolutePath(prefixKeyPath,
+              cacheOmDirInfo.getName());
+      OmKeyInfo cacheDirKeyInfo = OMFileRequest.getOmKeyInfo(volumeName,
+              bucketName, cacheOmDirInfo, fullDirPath);
+
+      countEntries = addKeyInfoToFileStatusList(cacheKeyMap, prefixKeyInDB,
+              seekKeyInDB, startKey, countEntries, cacheKey, cacheDirKeyInfo,
+              true);
+    }
+    return countEntries;
+  }
+
+  @SuppressWarnings("parameternumber")
+  private int addKeyInfoToFileStatusList(
+      TreeMap<String, OzoneFileStatus> cacheKeyMap,
+      long prefixKeyInDB, String seekKeyInDB, String startKey,
+      int countEntries, String cacheKey, OmKeyInfo cacheOmKeyInfo,
+      boolean isDirectory) {
+    // seekKeyInDB will have two type of values.
+    // 1. "1024/"   -> startKey is null or empty
+    // 2. "1024/b"  -> startKey exists
+    if (StringUtils.isBlank(startKey)) {
+      // startKey is null or empty, then the seekKeyInDB="1024/"
+      if (cacheKey.startsWith(seekKeyInDB)) {
+        OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
+                scmBlockSize, isDirectory);
+        cacheKeyMap.put(cacheOmKeyInfo.getKeyName(), fileStatus);
+        countEntries++;
+      }
+    } else {
+      // startKey not empty, then the seekKeyInDB="1024/b" and
+      // seekKeyInDBWithOnlyParentID = "1024/". This is to avoid case of
+      // parentID with "102444" cache entries.
+      // Here, it has to list all the keys after "1024/b" and requires >=0
+      // string comparison.
+      String seekKeyInDBWithOnlyParentID = prefixKeyInDB + OM_KEY_PREFIX;
+      if (cacheKey.startsWith(seekKeyInDBWithOnlyParentID) &&
+              cacheKey.compareTo(seekKeyInDB) >= 0) {
+        OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo,
+                scmBlockSize, isDirectory);
+        cacheKeyMap.put(cacheOmKeyInfo.getKeyName(), fileStatus);
+        countEntries++;
+      }
+    }
+    return countEntries;
+  }
+
   private String getNextGreaterString(String volumeName, String bucketName,
       String keyPrefix) throws IOException {
     // Increment the last character of the string and return the new ozone key.
@@ -2368,7 +2977,6 @@
     }
     return nodeSet;
   }
-
   private void slimLocationVersion(OmKeyInfo... keyInfos) {
     if (keyInfos != null) {
       for (OmKeyInfo keyInfo : keyInfos) {
@@ -2382,8 +2990,92 @@
           continue;
         }
         keyInfo.setKeyLocationVersions(keyInfo.getKeyLocationVersions()
-            .subList(keyLocationVersionLength-1, keyLocationVersionLength));
+                .subList(keyLocationVersionLength - 1, keyLocationVersionLength));
       }
     }
   }
+
+  @Override
+  public OmKeyInfo getPendingDeletionDir() throws IOException {
+    OmKeyInfo omKeyInfo = null;
+    try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+             deletedDirItr = metadataManager.getDeletedDirTable().iterator()) {
+      if (deletedDirItr.hasNext()) {
+        Table.KeyValue<String, OmKeyInfo> keyValue = deletedDirItr.next();
+        if (keyValue != null) {
+          omKeyInfo = keyValue.getValue();
+        }
+      }
+    }
+    return omKeyInfo;
+  }
+
+  @Override
+  public List<OmKeyInfo> getPendingDeletionSubDirs(OmKeyInfo parentInfo,
+      long numEntries) throws IOException {
+    List<OmKeyInfo> directories = new ArrayList<>();
+    String seekDirInDB = metadataManager.getOzonePathKey(
+        parentInfo.getObjectID(), "");
+    long countEntries = 0;
+
+    Table dirTable = metadataManager.getDirectoryTable();
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+        iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
+          parentInfo.getObjectID())) {
+        break;
+      }
+      String dirName = OMFileRequest.getAbsolutePath(parentInfo.getKeyName(),
+          dirInfo.getName());
+      OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(
+          parentInfo.getVolumeName(), parentInfo.getBucketName(), dirInfo,
+          dirName);
+      directories.add(omKeyInfo);
+      countEntries++;
+
+      // move to next entry in the DirTable
+      iterator.next();
+    }
+
+    return directories;
+  }
+
+  @Override
+  public List<OmKeyInfo> getPendingDeletionSubFiles(OmKeyInfo parentInfo,
+      long numEntries) throws IOException {
+    List<OmKeyInfo> files = new ArrayList<>();
+    String seekFileInDB = metadataManager.getOzonePathKey(
+        parentInfo.getObjectID(), "");
+    long countEntries = 0;
+
+    Table fileTable = metadataManager.getKeyTable();
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+        iterator = fileTable.iterator();
+
+    iterator.seek(seekFileInDB);
+
+    while (iterator.hasNext() && numEntries - countEntries > 0) {
+      OmKeyInfo fileInfo = iterator.value().getValue();
+      if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(),
+          parentInfo.getObjectID())) {
+        break;
+      }
+      fileInfo.setFileName(fileInfo.getKeyName());
+      String fullKeyPath = OMFileRequest.getAbsolutePath(
+          parentInfo.getKeyName(), fileInfo.getKeyName());
+      fileInfo.setKeyName(fullKeyPath);
+
+      files.add(fileInfo);
+      countEntries++;
+      // move to next entry in the KeyTable
+      iterator.next();
+    }
+
+    return files;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index 7ce0a16..cce545f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -154,6 +154,8 @@
   private @Metric MutableCounterLong numTrashFails;
   private @Metric MutableCounterLong numTrashRootsEnqueued;
   private @Metric MutableCounterLong numTrashRootsProcessed;
+  private @Metric MutableCounterLong numTrashAtomicDirRenames;
+  private @Metric MutableCounterLong numTrashAtomicDirDeletes;
 
   private final DBCheckpointMetrics dbCheckpointMetrics;
 
@@ -909,6 +911,13 @@
     return numTrashFilesDeletes.value();
   }
 
+  public long getNumTrashAtomicDirRenames() {
+    return numTrashAtomicDirRenames.value();
+  }
+
+  public long getNumTrashAtomicDirDeletes() {
+    return numTrashAtomicDirDeletes.value();
+  }
 
   public void incNumTrashActiveCycles() {
     numTrashActiveCycles.incr();
@@ -926,6 +935,14 @@
     numTrashFails.incr();
   }
 
+  public void incNumTrashAtomicDirRenames() {
+    numTrashAtomicDirRenames.incr();
+  }
+
+  public void incNumTrashAtomicDirDeletes() {
+    numTrashAtomicDirDeletes.incr();
+  }
+
   public void unRegister() {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     ms.unregisterSource(SOURCE_NAME);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 7d6a43b..fa387f8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.hdds.utils.TransactionInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmDirectoryInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
 import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
@@ -60,6 +61,7 @@
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -73,6 +75,7 @@
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.storage.proto
     .OzoneManagerStorageProtos.PersistedUserVolumeInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -103,6 +106,9 @@
    * OM DB stores metadata as KV pairs in different column families.
    * <p>
    * OM DB Schema:
+   *
+   *
+   * Common Tables:
    * |----------------------------------------------------------------------|
    * |  Column Family     |        VALUE                                    |
    * |----------------------------------------------------------------------|
@@ -112,23 +118,41 @@
    * |----------------------------------------------------------------------|
    * | bucketTable        |     /volume/bucket-> BucketInfo                 |
    * |----------------------------------------------------------------------|
-   * | keyTable           | /volumeName/bucketName/keyName->KeyInfo         |
-   * |----------------------------------------------------------------------|
-   * | deletedTable       | /volumeName/bucketName/keyName->RepeatedKeyInfo |
-   * |----------------------------------------------------------------------|
-   * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
-   * |----------------------------------------------------------------------|
    * | s3SecretTable      | s3g_access_key_id -> s3Secret                   |
    * |----------------------------------------------------------------------|
    * | dTokenTable        | OzoneTokenID -> renew_time                      |
    * |----------------------------------------------------------------------|
    * | prefixInfoTable    | prefix -> PrefixInfo                            |
    * |----------------------------------------------------------------------|
-   * |  multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...   |
+   * | multipartInfoTable | /volumeName/bucketName/keyName/uploadId ->...   |
    * |----------------------------------------------------------------------|
+   * | transactionInfoTable| #TRANSACTIONINFO -> OMTransactionInfo          |
    * |----------------------------------------------------------------------|
-   * |  transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo        |
+   *
+   * Simple Tables:
    * |----------------------------------------------------------------------|
+   * |  Column Family     |        VALUE                                    |
+   * |----------------------------------------------------------------------|
+   * | keyTable           | /volumeName/bucketName/keyName->KeyInfo         |
+   * |----------------------------------------------------------------------|
+   * | deletedTable       | /volumeName/bucketName/keyName->RepeatedKeyInfo |
+   * |----------------------------------------------------------------------|
+   * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
+   * |----------------------------------------------------------------------|
+   *
+   * Prefix Tables:
+   * |----------------------------------------------------------------------|
+   * |  Column Family     |        VALUE                                    |
+   * |----------------------------------------------------------------------|
+   * |  directoryTable    | parentId/directoryName -> DirectoryInfo         |
+   * |----------------------------------------------------------------------|
+   * |  fileTable         | parentId/fileName -> KeyInfo                    |
+   * |----------------------------------------------------------------------|
+   * |  openFileTable     | parentId/fileName/id -> KeyInfo                 |
+   * |----------------------------------------------------------------------|
+   * |  deletedDirTable   | parentId/directoryName -> KeyInfo               |
+   * |----------------------------------------------------------------------|
+   *
    */
 
   public static final String USER_TABLE = "userTable";
@@ -141,6 +165,10 @@
   public static final String S3_SECRET_TABLE = "s3SecretTable";
   public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
   public static final String PREFIX_TABLE = "prefixTable";
+  public static final String DIRECTORY_TABLE = "directoryTable";
+  public static final String FILE_TABLE = "fileTable";
+  public static final String OPEN_FILE_TABLE = "openFileTable";
+  public static final String DELETED_DIR_TABLE = "deletedDirectoryTable";
   public static final String TRANSACTION_INFO_TABLE =
       "transactionInfoTable";
 
@@ -159,9 +187,13 @@
   private Table s3SecretTable;
   private Table dTokenTable;
   private Table prefixTable;
+  private Table dirTable;
+  private Table fileTable;
+  private Table openFileTable;
   private Table transactionInfoTable;
   private boolean isRatisEnabled;
   private boolean ignorePipelineinKey;
+  private Table deletedDirTable;
 
   // Epoch is used to generate the objectIDs. The most significant 2 bits of
   // objectIDs is set to this epoch. For clusters before HDDS-4315 there is
@@ -197,7 +229,8 @@
    * For subclass overriding.
    */
   protected OmMetadataManagerImpl() {
-    this.lock = new OzoneManagerLock(new OzoneConfiguration());
+    OzoneConfiguration conf = new OzoneConfiguration();
+    this.lock = new OzoneManagerLock(conf);
     this.openKeyExpireThresholdMS =
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
     this.omEpoch = 0;
@@ -225,6 +258,9 @@
 
   @Override
   public Table<String, OmKeyInfo> getKeyTable() {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      return fileTable;
+    }
     return keyTable;
   }
 
@@ -234,7 +270,15 @@
   }
 
   @Override
+  public Table<String, OmKeyInfo> getDeletedDirTable() {
+    return deletedDirTable;
+  }
+
+  @Override
   public Table<String, OmKeyInfo> getOpenKeyTable() {
+    if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+      return openFileTable;
+    }
     return openKeyTable;
   }
 
@@ -244,6 +288,11 @@
   }
 
   @Override
+  public Table<String, OmDirectoryInfo> getDirectoryTable() {
+    return dirTable;
+  }
+
+  @Override
   public Table<String, OmMultipartKeyInfo> getMultipartInfoTable() {
     return multipartInfoTable;
   }
@@ -335,6 +384,10 @@
         .addTable(DELEGATION_TOKEN_TABLE)
         .addTable(S3_SECRET_TABLE)
         .addTable(PREFIX_TABLE)
+        .addTable(DIRECTORY_TABLE)
+        .addTable(FILE_TABLE)
+        .addTable(OPEN_FILE_TABLE)
+        .addTable(DELETED_DIR_TABLE)
         .addTable(TRANSACTION_INFO_TABLE)
         .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
         .addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -346,7 +399,8 @@
         .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec())
         .addCodec(S3SecretValue.class, new S3SecretValueCodec())
         .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec())
-        .addCodec(TransactionInfo.class, new TransactionInfoCodec());
+        .addCodec(TransactionInfo.class, new TransactionInfoCodec())
+        .addCodec(OmDirectoryInfo.class, new OmDirectoryInfoCodec());
   }
 
   /**
@@ -400,6 +454,22 @@
         OmPrefixInfo.class);
     checkTableStatus(prefixTable, PREFIX_TABLE);
 
+    dirTable = this.store.getTable(DIRECTORY_TABLE, String.class,
+            OmDirectoryInfo.class);
+    checkTableStatus(dirTable, DIRECTORY_TABLE);
+
+    fileTable = this.store.getTable(FILE_TABLE, String.class,
+            OmKeyInfo.class);
+    checkTableStatus(fileTable, FILE_TABLE);
+
+    openFileTable = this.store.getTable(OPEN_FILE_TABLE, String.class,
+            OmKeyInfo.class);
+    checkTableStatus(openFileTable, OPEN_FILE_TABLE);
+
+    deletedDirTable = this.store.getTable(DELETED_DIR_TABLE, String.class,
+        OmKeyInfo.class);
+    checkTableStatus(deletedDirTable, DELETED_DIR_TABLE);
+
     transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
         String.class, TransactionInfo.class);
     checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1165,4 +1235,31 @@
     return tableMap.keySet();
   }
 
+  @Override
+  public String getOzonePathKey(long parentObjectId, String pathComponentName) {
+    StringBuilder builder = new StringBuilder();
+    builder.append(parentObjectId);
+    builder.append(OM_KEY_PREFIX).append(pathComponentName);
+    return builder.toString();
+  }
+
+  @Override
+  public String getOpenFileName(long parentID, String fileName,
+                                long id) {
+    StringBuilder openKey = new StringBuilder();
+    openKey.append(parentID);
+    openKey.append(OM_KEY_PREFIX).append(fileName);
+    openKey.append(OM_KEY_PREFIX).append(id);
+    return openKey.toString();
+  }
+
+  @Override
+  public String getMultipartKey(long parentID, String fileName,
+                                String uploadId) {
+    StringBuilder openKey = new StringBuilder();
+    openKey.append(parentID);
+    openKey.append(OM_KEY_PREFIX).append(fileName);
+    openKey.append(OM_KEY_PREFIX).append(uploadId);
+    return openKey.toString();
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 569c127..f27beda 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -83,6 +83,8 @@
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper;
 import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.io.Text;
@@ -220,6 +222,9 @@
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_AUTH_TYPE;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METADATA_LAYOUT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METADATA_LAYOUT_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
@@ -350,6 +355,8 @@
 
   private Thread emptier;
 
+  private static final int MSECS_PER_MINUTE = 60 * 1000;
+
   @SuppressWarnings("methodlength")
   private OzoneManager(OzoneConfiguration conf) throws IOException,
       AuthenticationException {
@@ -1098,18 +1105,23 @@
    * Start service.
    */
   public void start() throws IOException {
+    initFSOLayout();
+
     omClientProtocolMetrics.register();
     HddsServerUtil.initializeMetrics(configuration, "OzoneManager");
 
     LOG.info(buildRpcServerStartMessage("OzoneManager RPC server",
         omRpcAddress));
 
+    metadataManager.start(configuration);
+
+    validatesBucketLayoutMismatches();
+
     // Start Ratis services
     if (omRatisServer != null) {
       omRatisServer.start();
     }
 
-    metadataManager.start(configuration);
     startSecretManagerIfNecessary();
 
 
@@ -1163,6 +1175,8 @@
    * Restarts the service. This method re-initializes the rpc server.
    */
   public void restart() throws IOException {
+    initFSOLayout();
+
     LOG.info(buildRpcServerStartMessage("OzoneManager RPC server",
         omRpcAddress));
 
@@ -1170,6 +1184,8 @@
 
     instantiateServices();
 
+    validatesBucketLayoutMismatches();
+
     startSecretManagerIfNecessary();
 
     // Set metrics and start metrics back ground thread
@@ -1223,13 +1239,14 @@
    * @throws IOException
    */
   private void startTrashEmptier(Configuration conf) throws IOException {
-    long hadoopTrashInterval =
-        conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
+    float hadoopTrashInterval =
+        conf.getFloat(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
     // check whether user has configured ozone specific trash-interval
     // if not fall back to hadoop configuration
     long trashInterval =
-            conf.getLong(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY,
-                hadoopTrashInterval);
+        (long)(conf.getFloat(
+            OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, hadoopTrashInterval)
+            * MSECS_PER_MINUTE);
     if (trashInterval == 0) {
       LOG.info("Trash Interval set to 0. Files deleted will not move to trash");
       return;
@@ -1766,6 +1783,21 @@
         .setAclRights(aclType)
         .setOwnerName(volumeOwner)
         .build();
+
+    return checkAcls(obj, context, throwIfPermissionDenied);
+  }
+
+  /**
+   * CheckAcls for the ozone object.
+   *
+   * @return true if permission granted, false if permission denied.
+   * @throws OMException ResultCodes.PERMISSION_DENIED if permission denied
+   *                     and throwOnPermissionDenied set to true.
+   */
+  public boolean checkAcls(OzoneObj obj, RequestContext context,
+                           boolean throwIfPermissionDenied)
+      throws OMException {
+
     if (!accessAuthorizer.checkAccess(obj, context)) {
       if (throwIfPermissionDenied) {
         LOG.warn("User {} doesn't have {} permission to access {} /{}/{}/{}",
@@ -1785,6 +1817,8 @@
     }
   }
 
+
+
   /**
    * Return true if Ozone acl's are enabled, else false.
    *
@@ -3710,6 +3744,11 @@
         OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
   }
 
+  public String getOMMetadataLayout() {
+    return configuration
+        .getTrimmed(OZONE_OM_METADATA_LAYOUT, OZONE_OM_METADATA_LAYOUT_DEFAULT);
+  }
+
   /**
    * Create volume which is required for S3Gateway operations.
    * @throws IOException
@@ -3816,4 +3855,81 @@
     this.minMultipartUploadPartSize = partSizeForTest;
   }
 
+  private void initFSOLayout() {
+    // TODO: Temporary workaround for OM upgrade path and will be replaced once
+    //  upgrade HDDS-3698 story reaches consensus. Instead of cluster level
+    //  configuration, OM needs to check this property on every bucket level.
+    String metaLayout = getOMMetadataLayout();
+    boolean omMetadataLayoutPrefix = StringUtils.equalsIgnoreCase(metaLayout,
+        OZONE_OM_METADATA_LAYOUT_PREFIX);
+
+    boolean omMetadataLayoutSimple = StringUtils.equalsIgnoreCase(metaLayout,
+        OZONE_OM_METADATA_LAYOUT_DEFAULT);
+
+    if (!(omMetadataLayoutPrefix || omMetadataLayoutSimple)) {
+      StringBuilder msg = new StringBuilder();
+      msg.append("Invalid Configuration. Failed to start OM in ");
+      msg.append(metaLayout);
+      msg.append(" layout format. Supported values are either ");
+      msg.append(OZONE_OM_METADATA_LAYOUT_DEFAULT);
+      msg.append(" or ");
+      msg.append(OZONE_OM_METADATA_LAYOUT_PREFIX);
+
+      LOG.error(msg.toString());
+      throw new IllegalArgumentException(msg.toString());
+    }
+
+    if (omMetadataLayoutPrefix && !getEnableFileSystemPaths()) {
+      StringBuilder msg = new StringBuilder();
+      msg.append("Invalid Configuration. Failed to start OM in ");
+      msg.append(OZONE_OM_METADATA_LAYOUT_PREFIX);
+      msg.append(" layout format as '");
+      msg.append(OZONE_OM_ENABLE_FILESYSTEM_PATHS);
+      msg.append("' is false!");
+
+      LOG.error(msg.toString());
+      throw new IllegalArgumentException(msg.toString());
+    }
+
+    OzoneManagerRatisUtils.setBucketFSOptimized(omMetadataLayoutPrefix);
+    String status = omMetadataLayoutPrefix ? "enabled" : "disabled";
+    LOG.info("Configured {}={} and {} optimized OM FS operations",
+        OZONE_OM_METADATA_LAYOUT, metaLayout, status);
+  }
+
+  private void validatesBucketLayoutMismatches() throws IOException {
+    String clusterLevelMetaLayout = getOMMetadataLayout();
+
+    TableIterator<String, ? extends Table.KeyValue<String, OmBucketInfo>>
+        iterator = metadataManager.getBucketTable().iterator();
+
+    while (iterator.hasNext()) {
+      Map<String, String> bucketMeta = iterator.next().getValue().getMetadata();
+      verifyBucketMetaLayout(clusterLevelMetaLayout, bucketMeta);
+    }
+  }
+
+  private void verifyBucketMetaLayout(String clusterLevelMetaLayout,
+      Map<String, String> bucketMetadata) throws IOException {
+    String bucketMetaLayout = bucketMetadata.get(OZONE_OM_METADATA_LAYOUT);
+    if (StringUtils.isBlank(bucketMetaLayout)) {
+      // Defaulting to SIMPLE
+      bucketMetaLayout = OZONE_OM_METADATA_LAYOUT_DEFAULT;
+    }
+    boolean supportedMetadataLayout =
+        StringUtils.equalsIgnoreCase(clusterLevelMetaLayout, bucketMetaLayout);
+
+    if (!supportedMetadataLayout) {
+      StringBuilder msg = new StringBuilder();
+      msg.append("Failed to start OM in ");
+      msg.append(clusterLevelMetaLayout);
+      msg.append(" layout format as existing bucket has a different layout ");
+      msg.append(bucketMetaLayout);
+      msg.append(" metadata format");
+
+      LOG.error(msg.toString());
+      throw new IOException(msg.toString());
+    }
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java
new file mode 100644
index 0000000..1172d12
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.security.acl.OzonePrefixPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+
+/**
+ * Implementation of OzonePrefixPath interface.
+ */
+public class OzonePrefixPathImpl implements OzonePrefixPath {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OzonePrefixPathImpl.class);
+  private String volumeName;
+  private String bucketName;
+  private KeyManager keyManager;
+  // TODO: based on need can make batchSize configurable.
+  private int batchSize = 1000;
+  private OzoneFileStatus pathStatus;
+
+  public OzonePrefixPathImpl(String volumeName, String bucketName,
+      String keyPrefix, KeyManager keyManagerImpl) throws IOException {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.keyManager = keyManagerImpl;
+
+    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyPrefix)
+        .setRefreshPipeline(false)
+        .build();
+    try {
+      pathStatus = keyManager.getFileStatus(omKeyArgs);
+    } catch (OMException ome) {
+      // In existing code non-FSO code, ozone client delete and rename expects
+      // KNF error code. So converting FNF to KEY_NOT_FOUND error code.
+      if (ome.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) {
+        throw new OMException(ome.getMessage(), KEY_NOT_FOUND);
+      }
+      throw ome;
+    }
+  }
+
+  @Override
+  public OzoneFileStatus getOzoneFileStatus() {
+    return pathStatus;
+  }
+
+  @Override
+  public Iterator<? extends OzoneFileStatus> getChildren(String keyPrefix)
+      throws IOException {
+
+    return new PathIterator(keyPrefix);
+  }
+
+  class PathIterator implements Iterator<OzoneFileStatus> {
+    private Iterator<OzoneFileStatus> currentIterator;
+    private String keyPrefix;
+    private OzoneFileStatus currentValue;
+
+    /**
+     * Creates an Iterator to iterate over all sub paths of the given keyPrefix.
+     *
+     * @param keyPrefix
+     */
+    PathIterator(String keyPrefix) throws IOException {
+      this.keyPrefix = keyPrefix;
+      this.currentValue = null;
+      List<OzoneFileStatus> statuses = getNextListOfKeys("");
+      if (statuses.size() == 1) {
+        OzoneFileStatus keyStatus = statuses.get(0);
+        if (keyStatus.isFile() && StringUtils.equals(keyPrefix,
+            keyStatus.getTrimmedName())) {
+          throw new OMException("Invalid keyPrefix: " + keyPrefix +
+              ", file type is not allowed, expected directory type.",
+              OMException.ResultCodes.INVALID_KEY_NAME);
+        }
+      }
+      this.currentIterator = statuses.iterator();
+    }
+
+    @Override
+    public boolean hasNext() {
+      if (!currentIterator.hasNext() && currentValue != null) {
+        String keyName = "";
+        try {
+          keyName = currentValue.getTrimmedName();
+          currentIterator =
+              getNextListOfKeys(keyName).iterator();
+        } catch (IOException e) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Exception while listing keys, keyName:" + keyName, e);
+          }
+          return false;
+        }
+      }
+      return currentIterator.hasNext();
+    }
+
+    @Override
+    public OzoneFileStatus next() {
+      if (hasNext()) {
+        currentValue = currentIterator.next();
+        return currentValue;
+      }
+      throw new NoSuchElementException();
+    }
+
+    /**
+     * Gets the next set of key list using keyManager OM interface.
+     *
+     * @param prevKey
+     * @return {@code List<OzoneFileStatus>}
+     */
+    List<OzoneFileStatus> getNextListOfKeys(String prevKey) throws
+        IOException {
+
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(volumeName)
+          .setBucketName(bucketName)
+          .setKeyName(keyPrefix)
+          .setRefreshPipeline(false)
+          .build();
+
+      List<OzoneFileStatus> statuses = keyManager.listStatus(omKeyArgs, false,
+          prevKey, batchSize);
+
+      // ListStatuses with non-null startKey will add startKey as first element
+      // in the resultList. Remove startKey element as it is duplicated one.
+      if (!statuses.isEmpty() && StringUtils.equals(prevKey,
+          statuses.get(0).getTrimmedName())) {
+        statuses.remove(0);
+      }
+      return statuses;
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index c64df9c..e9645e9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -35,10 +35,11 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
 import org.apache.ratis.protocol.ClientId;
@@ -154,6 +155,11 @@
     // check whether the src and dst belong to the same bucket & trashroot.
     OFSPath srcPath = new OFSPath(src);
     OFSPath dstPath = new OFSPath(dst);
+    OmBucketInfo bucket = ozoneManager.getBucketInfo(srcPath.getVolumeName(),
+        srcPath.getBucketName());
+    if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+      return renameFSO(srcPath, dstPath);
+    }
     Preconditions.checkArgument(srcPath.getBucketName().
         equals(dstPath.getBucketName()));
     Preconditions.checkArgument(srcPath.getTrashRoot().
@@ -163,14 +169,52 @@
     return true;
   }
 
+  private boolean renameFSO(OFSPath srcPath, OFSPath dstPath) {
+    ozoneManager.getMetrics().incNumTrashAtomicDirRenames();
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        getRenameKeyRequest(srcPath, dstPath);
+    try {
+      if(omRequest != null) {
+        submitRequest(omRequest);
+        return true;
+      }
+      return false;
+    } catch (Exception e){
+      LOG.error("Couldn't send rename request", e);
+      return false;
+    }
+  }
+
   @Override
   public boolean delete(Path path, boolean b) throws IOException {
     ozoneManager.getMetrics().incNumTrashDeletes();
+    OFSPath srcPath = new OFSPath(path);
+    OmBucketInfo bucket = ozoneManager.getBucketInfo(srcPath.getVolumeName(),
+        srcPath.getBucketName());
+    if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+      return deleteFSO(srcPath);
+    }
     DeleteIterator iterator = new DeleteIterator(path, true);
     iterator.iterate();
     return true;
   }
 
+  private boolean deleteFSO(OFSPath srcPath) {
+    ozoneManager.getMetrics().incNumTrashAtomicDirDeletes();
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        getDeleteKeyRequest(srcPath);
+    try {
+      if(omRequest != null) {
+        submitRequest(omRequest);
+        return true;
+      }
+      return false;
+    } catch (Throwable e) {
+      LOG.error("Couldn't send delete request.", e);
+      return false;
+    }
+  }
+
   @Override
   public FileStatus[] listStatus(Path path) throws  IOException {
     ozoneManager.getMetrics().incNumTrashListStatus();
@@ -377,6 +421,41 @@
     }
   }
 
+
+  private OzoneManagerProtocolProtos.OMRequest
+      getRenameKeyRequest(
+      OFSPath src, OFSPath dst) {
+    String volumeName = src.getVolumeName();
+    String bucketName = src.getBucketName();
+    String keyName = src.getKeyName();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        OzoneManagerProtocolProtos.KeyArgs.newBuilder()
+            .setKeyName(keyName)
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .build();
+    String toKeyName = dst.getKeyName();
+    OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest =
+        OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder()
+            .setKeyArgs(keyArgs)
+            .setToKeyName(toKeyName)
+            .build();
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        null;
+    try {
+      omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder()
+              .setClientId(CLIENT_ID.toString())
+              .setUserInfo(getUserInfo())
+              .setRenameKeyRequest(renameKeyRequest)
+              .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
+              .build();
+    } catch (IOException e) {
+      LOG.error("Couldn't get userinfo", e);
+    }
+    return omRequest;
+  }
+
   private class RenameIterator extends OzoneListingIterator {
     private final String srcPath;
     private final String dstPath;
@@ -408,40 +487,37 @@
       }
       return true;
     }
+  }
 
-    private OzoneManagerProtocolProtos.OMRequest
-        getRenameKeyRequest(
-        OFSPath src, OFSPath dst) {
-      String volumeName = src.getVolumeName();
-      String bucketName = src.getBucketName();
-      String keyName = src.getKeyName();
-
-      OzoneManagerProtocolProtos.KeyArgs keyArgs =
-          OzoneManagerProtocolProtos.KeyArgs.newBuilder()
-              .setKeyName(keyName)
-              .setVolumeName(volumeName)
-              .setBucketName(bucketName)
-              .build();
-      String toKeyName = dst.getKeyName();
-      OzoneManagerProtocolProtos.RenameKeyRequest renameKeyRequest =
-          OzoneManagerProtocolProtos.RenameKeyRequest.newBuilder()
-              .setKeyArgs(keyArgs)
-              .setToKeyName(toKeyName)
-              .build();
-      OzoneManagerProtocolProtos.OMRequest omRequest =
-          null;
-      try {
-        omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder()
-            .setClientId(CLIENT_ID.toString())
-            .setUserInfo(getUserInfo())
-            .setRenameKeyRequest(renameKeyRequest)
-            .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey)
+  private OzoneManagerProtocolProtos.OMRequest getDeleteKeyRequest(
+      OFSPath srcPath) {
+    String volume = srcPath.getVolumeName();
+    String bucket = srcPath.getBucketName();
+    String key  = srcPath.getKeyName();
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        OzoneManagerProtocolProtos.KeyArgs.newBuilder()
+            .setKeyName(key)
+            .setVolumeName(volume)
+            .setBucketName(bucket)
+            .setRecursive(true)
             .build();
-      } catch (IOException e) {
-        LOG.error("Couldn't get userinfo", e);
-      }
-      return omRequest;
+    OzoneManagerProtocolProtos.DeleteKeyRequest deleteKeyRequest =
+        OzoneManagerProtocolProtos.DeleteKeyRequest.newBuilder()
+            .setKeyArgs(keyArgs).build();
+    OzoneManagerProtocolProtos.OMRequest omRequest =
+        null;
+    try {
+      omRequest =
+          OzoneManagerProtocolProtos.OMRequest.newBuilder()
+              .setClientId(CLIENT_ID.toString())
+              .setUserInfo(getUserInfo())
+              .setDeleteKeyRequest(deleteKeyRequest)
+              .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
+              .build();
+    } catch (IOException e) {
+      LOG.error("Couldn't get userinfo", e);
     }
+    return omRequest;
   }
 
   private class DeleteIterator extends OzoneListingIterator {
@@ -467,7 +543,7 @@
       for (String keyPath : keyPathList) {
         OFSPath path = new OFSPath(keyPath);
         OzoneManagerProtocolProtos.OMRequest omRequest =
-            getDeleteKeyRequest(path);
+            getDeleteKeysRequest(path);
         try {
           ozoneManager.getMetrics().incNumTrashFilesDeletes();
           submitRequest(omRequest);
@@ -479,7 +555,7 @@
     }
 
     private OzoneManagerProtocolProtos.OMRequest
-        getDeleteKeyRequest(
+        getDeleteKeysRequest(
         OFSPath keyPath) {
       String volumeName = keyPath.getVolumeName();
       String bucketName = keyPath.getBucketName();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index 6e30ca4..fd1579c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -143,6 +144,39 @@
                     TransactionInfo.class,
                     new TransactionInfoCodec());
 
+  public static final DBColumnFamilyDefinition<String, OmDirectoryInfo>
+            DIRECTORY_TABLE =
+            new DBColumnFamilyDefinition<>(
+                    OmMetadataManagerImpl.DIRECTORY_TABLE,
+                    String.class,
+                    new StringCodec(),
+                    OmDirectoryInfo.class,
+                    new OmDirectoryInfoCodec());
+
+  public static final DBColumnFamilyDefinition<String, OmKeyInfo>
+            FILE_TABLE =
+            new DBColumnFamilyDefinition<>(
+                    OmMetadataManagerImpl.FILE_TABLE,
+                    String.class,
+                    new StringCodec(),
+                    OmKeyInfo.class,
+                    new OmKeyInfoCodec(true));
+
+  public static final DBColumnFamilyDefinition<String, OmKeyInfo>
+            OPEN_FILE_TABLE =
+            new DBColumnFamilyDefinition<>(
+                  OmMetadataManagerImpl.OPEN_FILE_TABLE,
+                  String.class,
+                  new StringCodec(),
+                  OmKeyInfo.class,
+                  new OmKeyInfoCodec(true));
+
+  public static final DBColumnFamilyDefinition<String, OmKeyInfo>
+      DELETED_DIR_TABLE =
+      new DBColumnFamilyDefinition<>(OmMetadataManagerImpl.DELETED_DIR_TABLE,
+          String.class, new StringCodec(), OmKeyInfo.class,
+          new OmKeyInfoCodec(true));
+
   @Override
   public String getName() {
     return OzoneConsts.OM_DB_NAME;
@@ -158,7 +192,8 @@
     return new DBColumnFamilyDefinition[] {DELETED_TABLE, USER_TABLE,
         VOLUME_TABLE, OPEN_KEY_TABLE, KEY_TABLE,
         BUCKET_TABLE, MULTIPART_INFO_TABLE, PREFIX_TABLE, DTOKEN_TABLE,
-        S3_SECRET_TABLE, TRANSACTION_INFO_TABLE};
+        S3_SECRET_TABLE, TRANSACTION_INFO_TABLE, DIRECTORY_TABLE,
+        FILE_TABLE, OPEN_FILE_TABLE, DELETED_DIR_TABLE};
   }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 29a0328..e95cd33 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -32,26 +32,41 @@
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
 import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
+import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest;
+import org.apache.hadoop.ozone.om.request.key.OMPathsPurgeRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequest;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
+import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequestWithFSO;
 import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest;
 import org.apache.hadoop.ozone.om.request.s3.security.S3RevokeSecretRequest;
 import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest;
@@ -80,8 +95,23 @@
  */
 public final class OzoneManagerRatisUtils {
 
+  // TODO: Temporary workaround for OM upgrade path and will be replaced once
+  //  upgrade HDDS-3698 story reaches consensus.
+  private static boolean isBucketFSOptimized = false;
+
   private OzoneManagerRatisUtils() {
   }
+
+  /**
+   * Sets enabled/disabled file system optimized path property. A true value
+   * represents enabled, false represents disabled.
+   *
+   * @param enabledFSO enabled/disabled file system optimized
+   */
+  public static void setBucketFSOptimized(boolean enabledFSO) {
+    OzoneManagerRatisUtils.isBucketFSOptimized = enabledFSO;
+  }
+
   /**
    * Create OMClientRequest which encapsulates the OMRequest.
    * @param omRequest
@@ -116,32 +146,67 @@
     case SetBucketProperty:
       return new OMBucketSetPropertyRequest(omRequest);
     case AllocateBlock:
+      if (isBucketFSOptimized()) {
+        return new OMAllocateBlockRequestWithFSO(omRequest);
+      }
       return new OMAllocateBlockRequest(omRequest);
     case CreateKey:
+      if (isBucketFSOptimized()) {
+        return new OMKeyCreateRequestWithFSO(omRequest);
+      }
       return new OMKeyCreateRequest(omRequest);
     case CommitKey:
+      if (isBucketFSOptimized()) {
+        return new OMKeyCommitRequestWithFSO(omRequest);
+      }
       return new OMKeyCommitRequest(omRequest);
     case DeleteKey:
+      if (isBucketFSOptimized()) {
+        return new OMKeyDeleteRequestWithFSO(omRequest);
+      }
       return new OMKeyDeleteRequest(omRequest);
     case DeleteKeys:
       return new OMKeysDeleteRequest(omRequest);
     case RenameKey:
+      if (isBucketFSOptimized()) {
+        return new OMKeyRenameRequestWithFSO(omRequest);
+      }
       return new OMKeyRenameRequest(omRequest);
     case RenameKeys:
       return new OMKeysRenameRequest(omRequest);
     case CreateDirectory:
+      if (isBucketFSOptimized()) {
+        return new OMDirectoryCreateRequestWithFSO(omRequest);
+      }
       return new OMDirectoryCreateRequest(omRequest);
     case CreateFile:
+      if (isBucketFSOptimized()) {
+        return new OMFileCreateRequestWithFSO(omRequest);
+      }
       return new OMFileCreateRequest(omRequest);
     case PurgeKeys:
       return new OMKeyPurgeRequest(omRequest);
+    case PurgePaths:
+      return new OMPathsPurgeRequestWithFSO(omRequest);
     case InitiateMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3InitiateMultipartUploadRequestWithFSO(omRequest);
+      }
       return new S3InitiateMultipartUploadRequest(omRequest);
     case CommitMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3MultipartUploadCommitPartRequestWithFSO(omRequest);
+      }
       return new S3MultipartUploadCommitPartRequest(omRequest);
     case AbortMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3MultipartUploadAbortRequestWithFSO(omRequest);
+      }
       return new S3MultipartUploadAbortRequest(omRequest);
     case CompleteMultiPartUpload:
+      if (isBucketFSOptimized()) {
+        return new S3MultipartUploadCompleteRequestWithFSO(omRequest);
+      }
       return new S3MultipartUploadCompleteRequest(omRequest);
     case AddAcl:
     case RemoveAcl:
@@ -174,6 +239,9 @@
       } else if (ObjectType.BUCKET == type) {
         return new OMBucketAddAclRequest(omRequest);
       } else if (ObjectType.KEY == type) {
+        if (isBucketFSOptimized()){
+          return new OMKeyAddAclRequestWithFSO(omRequest);
+        }
         return new OMKeyAddAclRequest(omRequest);
       } else {
         return new OMPrefixAddAclRequest(omRequest);
@@ -185,6 +253,9 @@
       } else if (ObjectType.BUCKET == type) {
         return new OMBucketRemoveAclRequest(omRequest);
       } else if (ObjectType.KEY == type) {
+        if (isBucketFSOptimized()){
+          return new OMKeyRemoveAclRequestWithFSO(omRequest);
+        }
         return new OMKeyRemoveAclRequest(omRequest);
       } else {
         return new OMPrefixRemoveAclRequest(omRequest);
@@ -196,6 +267,9 @@
       } else if (ObjectType.BUCKET == type) {
         return new OMBucketSetAclRequest(omRequest);
       } else if (ObjectType.KEY == type) {
+        if (isBucketFSOptimized()){
+          return new OMKeySetAclRequestWithFSO(omRequest);
+        }
         return new OMKeySetAclRequest(omRequest);
       } else {
         return new OMPrefixSetAclRequest(omRequest);
@@ -255,4 +329,15 @@
         .verifyTransactionInfo(transactionInfo, lastAppliedIndex, leaderId,
             newDBlocation, OzoneManager.LOG);
   }
+
+  /**
+   * Returns enabled/disabled file system optimized path property. A true value
+   * represents FSO path is enabled, false represents disabled.
+   *
+   * @return true or false.
+   */
+  public static boolean isBucketFSOptimized() {
+    return isBucketFSOptimized;
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 828c9e9..56fff9f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.AuditMessage;
 import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
@@ -38,6 +39,8 @@
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.ozone.security.acl.RequestContext;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -187,6 +190,51 @@
   }
 
   /**
+   * Check Acls for the ozone key.
+   * @param ozoneManager
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @throws IOException
+   */
+  protected void checkACLs(OzoneManager ozoneManager, String volumeName,
+      String bucketName, String keyName, IAccessAuthorizer.ACLType aclType)
+      throws IOException {
+
+    // TODO: Presently not populating sub-paths under a single bucket
+    //  lock. Need to revisit this to handle any concurrent operations
+    //  along with this.
+    OzonePrefixPathImpl pathViewer = new OzonePrefixPathImpl(volumeName,
+        bucketName, keyName, ozoneManager.getKeyManager());
+
+    OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
+        .setResType(OzoneObj.ResourceType.KEY)
+        .setStoreType(OzoneObj.StoreType.OZONE)
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setOzonePrefixPath(pathViewer).build();
+
+    boolean isDirectory = pathViewer.getOzoneFileStatus().isDirectory();
+
+    RequestContext.Builder contextBuilder = RequestContext.newBuilder()
+        .setAclRights(aclType)
+        .setRecursiveAccessCheck(isDirectory); // recursive checks for a dir
+
+    // check Acl
+    if (ozoneManager.getAclsEnabled()) {
+      String volumeOwner = ozoneManager.getVolumeOwner(obj.getVolumeName(),
+          contextBuilder.getAclRights(), obj.getResourceType());
+      contextBuilder.setClientUgi(createUGI());
+      contextBuilder.setIp(getRemoteAddress());
+      contextBuilder.setHost(getHostName());
+      contextBuilder.setAclType(IAccessAuthorizer.ACLIdentityType.USER);
+      contextBuilder.setOwnerName(volumeOwner);
+      ozoneManager.checkAcls(obj, contextBuilder.build(), true);
+    }
+  }
+
+  /**
    * Check Acls of ozone object with volOwner given.
    * @param ozoneManager
    * @param resType
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index 3f81f40..de09e92 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -20,13 +20,17 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.base.Optional;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
@@ -154,6 +158,9 @@
         getOmRequest());
     OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
 
+    // Add metadata layout to bucket info
+    addFSOptimizedBucketDetails(ozoneManager, omBucketInfo);
+
     AuditLogger auditLogger = ozoneManager.getAuditLogger();
     OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
 
@@ -357,4 +364,31 @@
 
   }
 
+  /**
+   * OM can support FS optimization only if both are flags are TRUE
+   * (enableFSOptimized=true && enableFSPaths=true) and will write table key
+   * entries in NEW_FORMAT(prefix separated format using objectID). All the
+   * other cases, it will
+   * write table key entries in OLD_FORMAT(existing format).
+   *
+   * @param ozoneManager ozone manager
+   * @param omBucketInfo bucket information
+   */
+  private void addFSOptimizedBucketDetails(OzoneManager ozoneManager,
+                                           OmBucketInfo omBucketInfo) {
+    Map<String, String> metadata = omBucketInfo.getMetadata();
+    if (metadata == null) {
+      metadata = new HashMap<>();
+    }
+    // TODO: Many unit test cases has null config and done a simple null
+    //  check now. It can be done later, to avoid massive test code changes.
+    if(StringUtils.isNotBlank(ozoneManager.getOMMetadataLayout())){
+      String metadataLayout = ozoneManager.getOMMetadataLayout();
+      metadata.put(OMConfigKeys.OZONE_OM_METADATA_LAYOUT, metadataLayout);
+      boolean fsPathsEnabled = ozoneManager.getEnableFileSystemPaths();
+      metadata.put(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+              Boolean.toString(fsPathsEnabled));
+      omBucketInfo.setMetadata(metadata);
+    }
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index ec05576..a9917e0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -361,4 +361,7 @@
         .setUpdateID(objectId);
   }
 
+  static long getMaxNumOfRecursiveDirs() {
+    return MAX_NUM_OF_RECURSIVE_DIRS;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
new file mode 100644
index 0000000..a9ad85a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
@@ -0,0 +1,314 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .CreateDirectoryRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .CreateDirectoryResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+        .Status;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*;
+
+/**
+ * Handle create directory request. It will add path components to the directory
+ * table and maintains file system semantics.
+ */
+public class OMDirectoryCreateRequestWithFSO extends OMDirectoryCreateRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMDirectoryCreateRequestWithFSO.class);
+
+  public OMDirectoryCreateRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    CreateDirectoryRequest createDirectoryRequest = getOmRequest()
+        .getCreateDirectoryRequest();
+    KeyArgs keyArgs = createDirectoryRequest.getKeyArgs();
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    int numKeysCreated = 0;
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    omResponse.setCreateDirectoryResponse(CreateDirectoryResponse.newBuilder());
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumCreateDirectory();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    boolean acquiredLock = false;
+    IOException exception = null;
+    OMClientResponse omClientResponse = null;
+    Result result = Result.FAILURE;
+    List<OmDirectoryInfo> missingParentInfos;
+
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      // Check if this is the root of the filesystem.
+      if (keyName.length() == 0) {
+        throw new OMException("Directory create failed. Cannot create " +
+            "directory at root of the filesystem",
+            OMException.ResultCodes.CANNOT_CREATE_DIRECTORY_AT_ROOT);
+      }
+      // acquire lock
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      Path keyPath = Paths.get(keyName);
+
+      // Need to check if any files exist in the given path, if they exist we
+      // cannot create a directory with the given key.
+      // Verify the path against directory table
+      OMFileRequest.OMPathInfoWithFSO omPathInfo =
+          OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName,
+              bucketName, keyName, keyPath);
+      OMFileRequest.OMDirectoryResult omDirectoryResult =
+          omPathInfo.getDirectoryResult();
+
+      if (omDirectoryResult == FILE_EXISTS ||
+          omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+        throw new OMException("Unable to create directory: " + keyName
+            + " in volume/bucket: " + volumeName + "/" + bucketName + " as " +
+                "file:" + omPathInfo.getFileExistsInPath() + " already exists",
+            FILE_ALREADY_EXISTS);
+      } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH ||
+          omDirectoryResult == NONE) {
+
+        // prepare all missing parents
+        missingParentInfos =
+                OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(
+                        ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+
+        // prepare leafNode dir
+        OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
+                omPathInfo.getLeafNodeName(),
+                keyArgs, omPathInfo.getLeafNodeObjectId(),
+                omPathInfo.getLastKnownParentId(), trxnLogIndex,
+                OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
+        OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+                Optional.of(dirInfo), Optional.of(missingParentInfos),
+                trxnLogIndex);
+
+        // total number of keys created.
+        numKeysCreated = missingParentInfos.size() + 1;
+
+        result = OMDirectoryCreateRequest.Result.SUCCESS;
+        omClientResponse =
+            new OMDirectoryCreateResponseWithFSO(omResponse.build(), dirInfo,
+                missingParentInfos, result);
+      } else {
+        result = Result.DIRECTORY_ALREADY_EXISTS;
+        omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS);
+        omClientResponse =
+            new OMDirectoryCreateResponseWithFSO(omResponse.build(), result);
+      }
+    } catch (IOException ex) {
+      exception = ex;
+      omClientResponse = new OMDirectoryCreateResponseWithFSO(
+          createErrorOMResponse(omResponse, exception), result);
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY,
+        auditMap, exception, userInfo));
+
+    logResult(createDirectoryRequest, keyArgs, omMetrics, numKeysCreated,
+            result, exception);
+
+    return omClientResponse;
+  }
+
+  private void logResult(CreateDirectoryRequest createDirectoryRequest,
+                         KeyArgs keyArgs, OMMetrics omMetrics, int numKeys,
+                         Result result,
+                         IOException exception) {
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.incNumKeys(numKeys);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Directory created. Volume:{}, Bucket:{}, Key:{}",
+            volumeName, bucketName, keyName);
+      }
+      break;
+    case DIRECTORY_ALREADY_EXISTS:
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}",
+            volumeName, bucketName, keyName, exception);
+      }
+      break;
+    case FAILURE:
+      omMetrics.incNumCreateDirectoryFails();
+      LOG.error("Directory creation failed. Volume:{}, Bucket:{}, Key{}. " +
+          "Exception:{}", volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMDirectoryCreateRequest: {}",
+          createDirectoryRequest);
+    }
+  }
+
+  /**
+   * Construct OmDirectoryInfo for every parent directory in missing list.
+   *
+   * @param keyArgs      key arguments
+   * @param pathInfo     list of parent directories to be created and its ACLs
+   * @param trxnLogIndex transaction log index id
+   * @return list of missing parent directories
+   * @throws IOException DB failure
+   */
+  public static List<OmDirectoryInfo> getAllMissingParentDirInfo(
+          OzoneManager ozoneManager, KeyArgs keyArgs,
+          OMFileRequest.OMPathInfoWithFSO pathInfo, long trxnLogIndex)
+          throws IOException {
+    List<OmDirectoryInfo> missingParentInfos = new ArrayList<>();
+
+    // The base id is left shifted by 8 bits for creating space to
+    // create (2^8 - 1) object ids in every request.
+    // maxObjId represents the largest object id allocation possible inside
+    // the transaction.
+    long baseObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex);
+    long maxObjId = baseObjId + getMaxNumOfRecursiveDirs();
+    long objectCount = 1;
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    long lastKnownParentId = pathInfo.getLastKnownParentId();
+    List<String> missingParents = pathInfo.getMissingParents();
+    List<OzoneAcl> inheritAcls = pathInfo.getAcls();
+    for (String missingKey : missingParents) {
+      long nextObjId = baseObjId + objectCount;
+      if (nextObjId > maxObjId) {
+        throw new OMException("Too many directories in path. Exceeds limit of "
+            + getMaxNumOfRecursiveDirs() + ". Unable to create directory: "
+            + keyName + " in volume/bucket: " + volumeName + "/" + bucketName,
+            INVALID_KEY_NAME);
+      }
+
+      LOG.debug("missing parent {} getting added to DirectoryTable",
+              missingKey);
+      OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey,
+              keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, inheritAcls);
+      objectCount++;
+
+      missingParentInfos.add(dirInfo);
+
+      // updating id for the next sub-dir
+      lastKnownParentId = nextObjId;
+    }
+    pathInfo.setLastKnownParentId(lastKnownParentId);
+    pathInfo.setLeafNodeObjectId(baseObjId + objectCount);
+    return missingParentInfos;
+  }
+
+  /**
+   * Fill in a DirectoryInfo for a new directory entry in OM database.
+   * without initializing ACLs from the KeyArgs - used for intermediate
+   * directories which get created internally/recursively during file
+   * and directory create.
+   * @param dirName
+   * @param keyArgs
+   * @param objectId
+   * @param parentObjectId
+   * @param inheritAcls
+   * @return the OmDirectoryInfo structure
+   */
+  private static OmDirectoryInfo createDirectoryInfoWithACL(
+          String dirName, KeyArgs keyArgs, long objectId,
+          long parentObjectId, long transactionIndex,
+          List<OzoneAcl> inheritAcls) {
+
+    return OmDirectoryInfo.newBuilder()
+            .setName(dirName)
+            .setCreationTime(keyArgs.getModificationTime())
+            .setModificationTime(keyArgs.getModificationTime())
+            .setObjectID(objectId)
+            .setUpdateID(transactionIndex)
+            .setParentObjectID(parentObjectId)
+            .setAcls(inheritAcls).build();
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index d8f4df2..ec7eb87 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -234,23 +234,10 @@
       List<OzoneAcl> inheritAcls = pathInfo.getAcls();
 
       // Check if a file or directory exists with same key name.
-      if (omDirectoryResult == FILE_EXISTS) {
-        if (!isOverWrite) {
-          throw new OMException("File " + keyName + " already exists",
-              OMException.ResultCodes.FILE_ALREADY_EXISTS);
-        }
-      } else if (omDirectoryResult == DIRECTORY_EXISTS) {
-        throw new OMException("Can not write to directory: " + keyName,
-            OMException.ResultCodes.NOT_A_FILE);
-      } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
-        throw new OMException(
-            "Can not create file: " + keyName + " as there " +
-                "is already file in the given path",
-            OMException.ResultCodes.NOT_A_FILE);
-      }
+      checkDirectoryResult(keyName, isOverWrite, omDirectoryResult);
 
       if (!isRecursive) {
-        checkAllParentsExist(ozoneManager, keyArgs, pathInfo);
+        checkAllParentsExist(keyArgs, pathInfo);
       }
 
       // do open key
@@ -355,8 +342,40 @@
     return omClientResponse;
   }
 
-  private void checkAllParentsExist(OzoneManager ozoneManager,
-      KeyArgs keyArgs,
+  /**
+   * Verify om directory result.
+   *
+   * @param keyName           key name
+   * @param isOverWrite       flag represents whether file can be overwritten
+   * @param omDirectoryResult directory result
+   * @throws OMException if file or directory or file exists in the given path
+   */
+  protected void checkDirectoryResult(String keyName, boolean isOverWrite,
+      OMFileRequest.OMDirectoryResult omDirectoryResult) throws OMException {
+    if (omDirectoryResult == FILE_EXISTS) {
+      if (!isOverWrite) {
+        throw new OMException("File " + keyName + " already exists",
+            OMException.ResultCodes.FILE_ALREADY_EXISTS);
+      }
+    } else if (omDirectoryResult == DIRECTORY_EXISTS) {
+      throw new OMException("Can not write to directory: " + keyName,
+          OMException.ResultCodes.NOT_A_FILE);
+    } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+      throw new OMException(
+          "Can not create file: " + keyName + " as there " +
+              "is already file in the given path",
+          OMException.ResultCodes.NOT_A_FILE);
+    }
+  }
+
+  /**
+   * Verify the existence of parent directory.
+   *
+   * @param keyArgs  key arguments
+   * @param pathInfo om path info
+   * @throws IOException directory not found
+   */
+  protected void checkAllParentsExist(KeyArgs keyArgs,
       OMFileRequest.OMPathInfo pathInfo) throws IOException {
     String keyName = keyArgs.getKeyName();
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
new file mode 100644
index 0000000..d792222
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles create file request layout version1.
+ */
+public class OMFileCreateRequestWithFSO extends OMFileCreateRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMFileCreateRequestWithFSO.class);
+  public OMFileCreateRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
+    KeyArgs keyArgs = createFileRequest.getKeyArgs();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    // if isRecursive is true, file would be created even if parent
+    // directories does not exist.
+    boolean isRecursive = createFileRequest.getIsRecursive();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("File create for : " + volumeName + "/" + bucketName + "/"
+          + keyName + ":" + isRecursive);
+    }
+
+    // if isOverWrite is true, file would be over written.
+    boolean isOverWrite = createFileRequest.getIsOverwrite();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumCreateFile();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    boolean acquiredLock = false;
+
+    OmBucketInfo omBucketInfo = null;
+    final List<OmKeyLocationInfo> locations = new ArrayList<>();
+    List<OmDirectoryInfo> missingParentInfos;
+    int numKeysCreated = 0;
+
+    OMClientResponse omClientResponse = null;
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    IOException exception = null;
+    Result result = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      if (keyName.length() == 0) {
+        // Check if this is the root of the filesystem.
+        throw new OMException("Can not write to directory: " + keyName,
+                OMException.ResultCodes.NOT_A_FILE);
+      }
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      // acquire lock
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      OmKeyInfo dbFileInfo = null;
+
+      OMFileRequest.OMPathInfoWithFSO pathInfoFSO =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, Paths.get(keyName));
+
+      if (pathInfoFSO.getDirectoryResult()
+              == OMFileRequest.OMDirectoryResult.FILE_EXISTS) {
+        String dbFileKey = omMetadataManager.getOzonePathKey(
+                pathInfoFSO.getLastKnownParentId(),
+                pathInfoFSO.getLeafNodeName());
+        dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataManager, dbFileKey, keyName);
+        if (dbFileInfo != null) {
+          ozoneManager.getKeyManager().refresh(dbFileInfo);
+        }
+      }
+
+      // check if the file or directory already existed in OM
+      checkDirectoryResult(keyName, isOverWrite,
+              pathInfoFSO.getDirectoryResult());
+
+      if (!isRecursive) {
+        checkAllParentsExist(keyArgs, pathInfoFSO);
+      }
+
+      // add all missing parents to dir table
+      missingParentInfos =
+              OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(
+                      ozoneManager, keyArgs, pathInfoFSO, trxnLogIndex);
+
+      // total number of keys created.
+      numKeysCreated = missingParentInfos.size();
+
+      // do open key
+      OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
+          omMetadataManager.getBucketKey(volumeName, bucketName));
+
+      OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs,
+              dbFileInfo, keyArgs.getDataSize(), locations,
+              getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(),
+              bucketInfo, pathInfoFSO, trxnLogIndex,
+              pathInfoFSO.getLeafNodeObjectId(),
+              ozoneManager.isRatisEnabled());
+
+      long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
+      long clientID = createFileRequest.getClientID();
+      String dbOpenFileName = omMetadataManager
+          .getOpenFileName(pathInfoFSO.getLastKnownParentId(),
+              pathInfoFSO.getLeafNodeName(), clientID);
+
+      // Append new blocks
+      List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
+          .stream().map(OmKeyLocationInfo::getFromProtobuf)
+          .collect(Collectors.toList());
+      omFileInfo.appendNewBlocks(newLocationList, false);
+
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+      // check bucket and volume quota
+      long preAllocatedSpace = newLocationList.size()
+              * ozoneManager.getScmBlockSize()
+              * omFileInfo.getReplicationConfig().getRequiredNodes();
+      checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
+      checkBucketQuotaInNamespace(omBucketInfo, 1L);
+
+      // Add to cache entry can be done outside of lock for this openKey.
+      // Even if bucket gets deleted, when commitKey we shall identify if
+      // bucket gets deleted.
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+              dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(),
+              trxnLogIndex);
+
+      // Add cache entries for the prefix directories.
+      // Skip adding for the file key itself, until Key Commit.
+      OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+              Optional.absent(), Optional.of(missingParentInfos),
+              trxnLogIndex);
+
+      omBucketInfo.incrUsedBytes(preAllocatedSpace);
+      // Update namespace quota
+      omBucketInfo.incrUsedNamespace(1L);
+
+      // Prepare response. Sets user given full key name in the 'keyName'
+      // attribute in response object.
+      int clientVersion = getOmRequest().getVersion();
+      omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
+          .setKeyInfo(omFileInfo.getProtobuf(keyName, clientVersion))
+          .setID(clientID)
+          .setOpenVersion(openVersion).build())
+          .setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponseWithFSO(omResponse.build(),
+              omFileInfo, missingParentInfos, clientID,
+              omBucketInfo.copyObject());
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omMetrics.incNumCreateFileFails();
+      omResponse.setCmdType(Type.CreateFile);
+      omClientResponse = new OMFileCreateResponseWithFSO(createErrorOMResponse(
+            omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    // Audit Log outside the lock
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+        OMAction.CREATE_FILE, auditMap, exception,
+        getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.incNumKeys(numKeysCreated);
+      LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName,
+          bucketName, keyName);
+      break;
+    case FAILURE:
+      LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}.",
+          volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMFileCreateRequest: {}",
+          createFileRequest);
+    }
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index f020f12..44c1ae4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -20,20 +20,43 @@
 
 import java.io.IOException;
 import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.base.Optional;
+import com.google.common.base.Strings;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
 
 /**
  * Base class for file requests.
@@ -127,6 +150,169 @@
   }
 
   /**
+   * Verify any dir/key exist in the given path in the specified
+   * volume/bucket by iterating through directory table.
+   *
+   * @param omMetadataManager OM Metadata manager
+   * @param volumeName        volume name
+   * @param bucketName        bucket name
+   * @param keyName           key name
+   * @param keyPath           path
+   * @return OMPathInfoWithFSO path info object
+   * @throws IOException on DB failure
+   */
+  public static OMPathInfoWithFSO verifyDirectoryKeysInPath(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull String volumeName,
+          @Nonnull String bucketName, @Nonnull String keyName,
+          @Nonnull Path keyPath) throws IOException {
+
+    String leafNodeName = OzoneFSUtils.getFileName(keyName);
+    List<String> missing = new ArrayList<>();
+
+    // Found no files/ directories in the given path.
+    OMDirectoryResult result = OMDirectoryResult.NONE;
+
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    // by default, inherit bucket ACLs
+    List<OzoneAcl> inheritAcls = omBucketInfo.getAcls();
+
+    long lastKnownParentId = omBucketInfo.getObjectID();
+    String dbDirName = ""; // absolute path for trace logs
+    // for better logging
+    StringBuilder fullKeyPath = new StringBuilder(bucketKey);
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+      fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
+      fullKeyPath.append(fileName);
+      if (missing.size() > 0) {
+        // Add all the sub-dirs to the missing list except the leaf element.
+        // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt.
+        // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list.
+        if(elements.hasNext()){
+          // skips leaf node.
+          missing.add(fileName);
+        }
+        continue;
+      }
+
+      // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt
+      // 1. Do lookup on directoryTable. If not exists goto next step.
+      // 2. Do look on keyTable. If not exists goto next step.
+      // 3. Add 'sub-dir' to missing parents list
+      String dbNodeName = omMetadataManager.getOzonePathKey(
+              lastKnownParentId, fileName);
+      OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
+              get(dbNodeName);
+      if (omDirInfo != null) {
+        dbDirName += omDirInfo.getName() + OzoneConsts.OZONE_URI_DELIMITER;
+        if (elements.hasNext()) {
+          result = OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
+          lastKnownParentId = omDirInfo.getObjectID();
+          inheritAcls = omDirInfo.getAcls();
+          continue;
+        } else {
+          // Checked all the sub-dirs till the leaf node.
+          // Found a directory in the given path.
+          result = OMDirectoryResult.DIRECTORY_EXISTS;
+        }
+      } else {
+        // Get parentID from the lastKnownParent. For any files, directly under
+        // the bucket, the parent is the bucketID. Say, "/vol1/buck1/file1"
+        // TODO: Need to add UT for this case along with OMFileCreateRequest.
+        if (omMetadataManager.getKeyTable().isExist(dbNodeName)) {
+          if (elements.hasNext()) {
+            // Found a file in the given key name.
+            result = OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
+          } else {
+            // Checked all the sub-dirs till the leaf file.
+            // Found a file with the given key name.
+            result = OMDirectoryResult.FILE_EXISTS;
+          }
+          break; // Skip directory traversal as it hits key.
+        }
+
+        // Add to missing list, there is no such file/directory with given name.
+        if (elements.hasNext()) {
+          missing.add(fileName);
+        }
+      }
+    }
+
+    LOG.trace("verifyFiles/Directories in Path : " + "/" + volumeName
+            + "/" + bucketName + "/" + keyName + ":" + result);
+
+    if (result == OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH || result ==
+            OMDirectoryResult.FILE_EXISTS) {
+      return new OMPathInfoWithFSO(leafNodeName, lastKnownParentId, missing,
+              result, inheritAcls, fullKeyPath.toString());
+    }
+
+    String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName,
+            bucketName, dbDirName);
+    LOG.trace("Acls inherited from parent " + dbDirKeyName + " are : "
+            + inheritAcls);
+
+    return new OMPathInfoWithFSO(leafNodeName, lastKnownParentId, missing,
+            result, inheritAcls);
+  }
+
+  /**
+   * Class to return the results from verifyDirectoryKeysInPath.
+   * Includes the list of missing intermediate directories and
+   * the directory search result code.
+   */
+  public static class OMPathInfoWithFSO extends OMPathInfo{
+    private String leafNodeName;
+    private long lastKnownParentId;
+    private long leafNodeObjectId;
+    private String fileExistsInPath;
+
+    public OMPathInfoWithFSO(String leafNodeName, long lastKnownParentId,
+                        List missingParents, OMDirectoryResult result,
+                        List<OzoneAcl> aclList, String fileExistsInPath) {
+      super(missingParents, result, aclList);
+      this.leafNodeName = leafNodeName;
+      this.lastKnownParentId = lastKnownParentId;
+      this.fileExistsInPath = fileExistsInPath;
+    }
+
+    public OMPathInfoWithFSO(String leafNodeName, long lastKnownParentId,
+                        List missingParents, OMDirectoryResult result,
+                        List<OzoneAcl> aclList) {
+      this(leafNodeName, lastKnownParentId, missingParents, result, aclList,
+              "");
+    }
+
+    public String getLeafNodeName() {
+      return leafNodeName;
+    }
+
+    public long getLeafNodeObjectId() {
+      return leafNodeObjectId;
+    }
+
+    public void setLeafNodeObjectId(long leafNodeObjectId) {
+      this.leafNodeObjectId = leafNodeObjectId;
+    }
+
+    public void setLastKnownParentId(long lastKnownParentId) {
+      this.lastKnownParentId = lastKnownParentId;
+    }
+
+    public long getLastKnownParentId() {
+      return lastKnownParentId;
+    }
+
+    public String getFileExistsInPath() {
+      return fileExistsInPath;
+    }
+  }
+
+  /**
    * Class to return the results from verifyFilesInPath.
    * Includes the list of missing intermediate directories and
    * the directory search result code.
@@ -224,4 +410,563 @@
           new CacheValue<>(keyInfo, index));
     }
   }
+
+  /**
+   * Adding directory info to the Table cache.
+   *
+   * @param omMetadataManager  OM Metadata Manager
+   * @param dirInfo            directory info
+   * @param missingParentInfos list of the parents to be added to DB
+   * @param trxnLogIndex       transaction log index
+   */
+  public static void addDirectoryTableCacheEntries(
+          OMMetadataManager omMetadataManager,
+          Optional<OmDirectoryInfo> dirInfo,
+          Optional<List<OmDirectoryInfo>> missingParentInfos,
+          long trxnLogIndex) {
+    for (OmDirectoryInfo subDirInfo : missingParentInfos.get()) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(omMetadataManager.getOzonePathKey(
+                      subDirInfo.getParentObjectID(), subDirInfo.getName())),
+              new CacheValue<>(Optional.of(subDirInfo), trxnLogIndex));
+    }
+
+    if (dirInfo.isPresent()) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(omMetadataManager.getOzonePathKey(
+                      dirInfo.get().getParentObjectID(),
+                      dirInfo.get().getName())),
+              new CacheValue<>(dirInfo, trxnLogIndex));
+    }
+  }
+
+  /**
+   * Adding Key info to the openFile Table cache.
+   *
+   * @param omMetadataManager OM Metadata Manager
+   * @param dbOpenFileName    open file name key
+   * @param omFileInfo        key info
+   * @param fileName          file name
+   * @param trxnLogIndex      transaction log index
+   */
+  public static void addOpenFileTableCacheEntry(
+          OMMetadataManager omMetadataManager, String dbOpenFileName,
+          @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+    Optional<OmKeyInfo> keyInfoOptional = Optional.absent();
+    if (omFileInfo != null) {
+      // New key format for the openFileTable.
+      // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+      // keyName field stores only the leaf node name, which is 'file1'.
+      omFileInfo.setKeyName(fileName);
+      omFileInfo.setFileName(fileName);
+      keyInfoOptional = Optional.of(omFileInfo);
+    }
+
+    omMetadataManager.getOpenKeyTable().addCacheEntry(
+            new CacheKey<>(dbOpenFileName),
+            new CacheValue<>(keyInfoOptional, trxnLogIndex));
+  }
+
+  /**
+   * Adding Key info to the file table cache.
+   *
+   * @param omMetadataManager OM Metadata Manager
+   * @param dbFileKey         file name key
+   * @param omFileInfo        key info
+   * @param fileName          file name
+   * @param trxnLogIndex      transaction log index
+   */
+  public static void addFileTableCacheEntry(
+          OMMetadataManager omMetadataManager, String dbFileKey,
+          OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+    // New key format for the fileTable.
+    // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+    // keyName field stores only the leaf node name, which is 'file1'.
+    omFileInfo.setKeyName(fileName);
+    omFileInfo.setFileName(fileName);
+
+    omMetadataManager.getKeyTable().addCacheEntry(
+            new CacheKey<>(dbFileKey),
+            new CacheValue<>(Optional.of(omFileInfo), trxnLogIndex));
+  }
+
+  /**
+   * Adding omKeyInfo to open file table.
+   *
+   * @param omMetadataMgr    OM Metadata Manager
+   * @param batchOp          batch of db operations
+   * @param omFileInfo       omKeyInfo
+   * @param openKeySessionID clientID
+   * @throws IOException DB failure
+   */
+  public static void addToOpenFileTable(OMMetadataManager omMetadataMgr,
+                                        BatchOperation batchOp,
+                                        OmKeyInfo omFileInfo,
+                                        long openKeySessionID)
+          throws IOException {
+
+    String dbOpenFileKey = omMetadataMgr.getOpenFileName(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName(),
+            openKeySessionID);
+
+    omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, dbOpenFileKey,
+            omFileInfo);
+  }
+
+  /**
+   * Adding multipart omKeyInfo to open file table.
+   *
+   * @param omMetadataMgr OM Metadata Manager
+   * @param batchOp       batch of db operations
+   * @param omFileInfo    omKeyInfo
+   * @param uploadID      uploadID
+   * @return multipartFileKey
+   * @throws IOException DB failure
+   */
+  public static String addToOpenFileTable(OMMetadataManager omMetadataMgr,
+      BatchOperation batchOp, OmKeyInfo omFileInfo, String uploadID)
+          throws IOException {
+
+    String multipartFileKey = omMetadataMgr.getMultipartKey(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName(),
+            uploadID);
+
+    omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, multipartFileKey,
+            omFileInfo);
+
+    return multipartFileKey;
+  }
+
+  /**
+   * Adding omKeyInfo to file table.
+   *
+   * @param omMetadataMgr
+   * @param batchOp
+   * @param omFileInfo
+   * @return db file key
+   * @throws IOException
+   */
+  public static String addToFileTable(OMMetadataManager omMetadataMgr,
+                                    BatchOperation batchOp,
+                                    OmKeyInfo omFileInfo)
+          throws IOException {
+
+    String dbFileKey = omMetadataMgr.getOzonePathKey(
+            omFileInfo.getParentObjectID(), omFileInfo.getFileName());
+
+    omMetadataMgr.getKeyTable().putWithBatch(batchOp,
+            dbFileKey, omFileInfo);
+    return dbFileKey;
+  }
+
+  /**
+   * Gets om key info from open key table if openFileTable flag is true,
+   * otherwise get it from key table.
+   *
+   * @param openFileTable if true add KeyInfo to openFileTable, otherwise to
+   *                      fileTable
+   * @param omMetadataMgr OM Metadata Manager
+   * @param dbOpenFileKey open file kaye name in DB
+   * @param keyName       key name
+   * @return om key info
+   * @throws IOException DB failure
+   */
+  public static OmKeyInfo getOmKeyInfoFromFileTable(boolean openFileTable,
+      OMMetadataManager omMetadataMgr, String dbOpenFileKey, String keyName)
+          throws IOException {
+
+    OmKeyInfo dbOmKeyInfo;
+    if (openFileTable) {
+      dbOmKeyInfo = omMetadataMgr.getOpenKeyTable().get(dbOpenFileKey);
+    } else {
+      dbOmKeyInfo = omMetadataMgr.getKeyTable().get(dbOpenFileKey);
+    }
+
+    // DB OMKeyInfo will store only fileName into keyName field. This
+    // function is to set user given keyName into the OmKeyInfo object.
+    // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+    // keyName field stores only the leaf node name, which is 'file1'.
+    if (dbOmKeyInfo != null) {
+      dbOmKeyInfo.setKeyName(keyName);
+    }
+    return dbOmKeyInfo;
+  }
+
+  /**
+   * Gets OmKeyInfo if exists for the given key name in the DB.
+   *
+   * @param omMetadataMgr metadata manager
+   * @param volumeName    volume name
+   * @param bucketName    bucket name
+   * @param keyName       key name
+   * @param scmBlockSize  scm block size
+   * @return OzoneFileStatus
+   * @throws IOException DB failure
+   */
+  @Nullable
+  public static OzoneFileStatus getOMKeyInfoIfExists(
+      OMMetadataManager omMetadataMgr, String volumeName, String bucketName,
+      String keyName, long scmBlockSize) throws IOException {
+
+    OMFileRequest.validateBucket(omMetadataMgr, volumeName, bucketName);
+
+    Path keyPath = Paths.get(keyName);
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataMgr.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataMgr.getBucketTable().get(bucketKey);
+
+    long lastKnownParentId = omBucketInfo.getObjectID();
+    OmDirectoryInfo omDirInfo = null;
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+
+      // For example, /vol1/buck1/a/b/c/d/e/file1.txt
+      // 1. Do lookup path component on directoryTable starting from bucket
+      // 'buck1' to the leaf node component, which is 'file1.txt'.
+      // 2. If there is no dir exists for the leaf node component 'file1.txt'
+      // then do look it on fileTable.
+      String dbNodeName = omMetadataMgr.getOzonePathKey(
+              lastKnownParentId, fileName);
+      omDirInfo = omMetadataMgr.getDirectoryTable().get(dbNodeName);
+
+      if (omDirInfo != null) {
+        lastKnownParentId = omDirInfo.getObjectID();
+      } else if (!elements.hasNext()) {
+        // reached last path component. Check file exists for the given path.
+        OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataMgr, dbNodeName, keyName);
+        if (omKeyInfo != null) {
+          return new OzoneFileStatus(omKeyInfo, scmBlockSize, false);
+        }
+      } else {
+        // Missing intermediate directory and just return null;
+        // key not found in DB
+        return null;
+      }
+    }
+
+    if (omDirInfo != null) {
+      OmKeyInfo omKeyInfo = getOmKeyInfo(volumeName, bucketName, omDirInfo,
+              keyName);
+      return new OzoneFileStatus(omKeyInfo, scmBlockSize, true);
+    }
+
+    // key not found in DB
+    return null;
+  }
+
+  /**
+   * Prepare OmKeyInfo from OmDirectoryInfo.
+   *
+   * @param volumeName volume name
+   * @param bucketName bucket name
+   * @param dirInfo    directory info
+   * @param keyName    user given key name
+   * @return OmKeyInfo object
+   */
+  @NotNull
+  public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+      OmDirectoryInfo dirInfo, String keyName) {
+
+    OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+    builder.setParentObjectID(dirInfo.getParentObjectID());
+    builder.setKeyName(keyName);
+    builder.setAcls(dirInfo.getAcls());
+    builder.addAllMetadata(dirInfo.getMetadata());
+    builder.setVolumeName(volumeName);
+    builder.setBucketName(bucketName);
+    builder.setCreationTime(dirInfo.getCreationTime());
+    builder.setModificationTime(dirInfo.getModificationTime());
+    builder.setObjectID(dirInfo.getObjectID());
+    builder.setUpdateID(dirInfo.getUpdateID());
+    builder.setFileName(dirInfo.getName());
+    builder.setReplicationConfig(new RatisReplicationConfig(
+            HddsProtos.ReplicationFactor.ONE));
+    builder.setOmKeyLocationInfos(Collections.singletonList(
+            new OmKeyLocationInfoGroup(0, new ArrayList<>())));
+    return builder.build();
+  }
+
+  /**
+   * Returns absolute path.
+   *
+   * @param prefixName prefix path
+   * @param fileName   file name
+   * @return absolute path
+   */
+  @NotNull
+  public static String getAbsolutePath(String prefixName, String fileName) {
+    if (Strings.isNullOrEmpty(prefixName)) {
+      return fileName;
+    }
+    prefixName = OzoneFSUtils.addTrailingSlashIfNeeded(prefixName);
+    return prefixName.concat(fileName);
+  }
+
+  /**
+   * Build DirectoryInfo from OmKeyInfo.
+   *
+   * @param keyInfo omKeyInfo
+   * @return omDirectoryInfo object
+   */
+  public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo){
+    OmDirectoryInfo.Builder builder = new OmDirectoryInfo.Builder();
+    builder.setParentObjectID(keyInfo.getParentObjectID());
+    builder.setAcls(keyInfo.getAcls());
+    builder.addAllMetadata(keyInfo.getMetadata());
+    builder.setCreationTime(keyInfo.getCreationTime());
+    builder.setModificationTime(keyInfo.getModificationTime());
+    builder.setObjectID(keyInfo.getObjectID());
+    builder.setUpdateID(keyInfo.getUpdateID());
+    builder.setName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
+    return builder.build();
+  }
+
+  /**
+   * Verify that the given toKey directory is a sub directory of fromKey
+   * directory.
+   * <p>
+   * For example, special case of renaming a directory to its own
+   * sub-directory is not allowed.
+   *
+   * @param fromKeyName source path
+   * @param toKeyName   destination path
+   * @param isDir       true represents a directory type otw a file type
+   * @throws OMException if the dest dir is a sub-dir of source dir.
+   */
+  public static void verifyToDirIsASubDirOfFromDirectory(String fromKeyName,
+      String toKeyName, boolean isDir) throws OMException {
+    if (!isDir) {
+      return;
+    }
+    Path dstParent = Paths.get(toKeyName).getParent();
+    while (dstParent != null) {
+      if (Paths.get(fromKeyName).equals(dstParent)) {
+        throw new OMException("Cannot rename a directory to its own " +
+                "subdirectory", OMException.ResultCodes.KEY_RENAME_ERROR);
+        // TODO: Existing rename throws java.lang.IllegalArgumentException.
+        //       Should we throw same exception ?
+      }
+      dstParent = dstParent.getParent();
+    }
+  }
+
+  /**
+   * Verify parent exists for the destination path and return destination
+   * path parent Id.
+   * <p>
+   * Check whether dst parent dir exists or not. If the parent exists, then the
+   * source can be renamed to dst path.
+   *
+   * @param volumeName  volume name
+   * @param bucketName  bucket name
+   * @param toKeyName   destination path
+   * @param fromKeyName source path
+   * @param metaMgr     metadata manager
+   * @throws IOException if the destination parent dir doesn't exists.
+   */
+  public static long getToKeyNameParentId(String volumeName,
+      String bucketName, String toKeyName, String fromKeyName,
+      OMMetadataManager metaMgr) throws IOException {
+
+    int totalDirsCount = OzoneFSUtils.getFileCount(toKeyName);
+    // skip parent is root '/'
+    if (totalDirsCount <= 1) {
+      String bucketKey = metaMgr.getBucketKey(volumeName, bucketName);
+      OmBucketInfo omBucketInfo =
+              metaMgr.getBucketTable().get(bucketKey);
+      return omBucketInfo.getObjectID();
+    }
+
+    String toKeyParentDir = OzoneFSUtils.getParentDir(toKeyName);
+
+    OzoneFileStatus toKeyParentDirStatus = getOMKeyInfoIfExists(metaMgr,
+            volumeName, bucketName, toKeyParentDir, 0);
+    // check if the immediate parent exists
+    if (toKeyParentDirStatus == null) {
+      throw new OMException(String.format(
+              "Failed to rename %s to %s, %s doesn't exist", fromKeyName,
+              toKeyName, toKeyParentDir),
+              OMException.ResultCodes.KEY_RENAME_ERROR);
+    } else if (toKeyParentDirStatus.isFile()){
+      throw new OMException(String.format(
+              "Failed to rename %s to %s, %s is a file", fromKeyName, toKeyName,
+              toKeyParentDir), OMException.ResultCodes.KEY_RENAME_ERROR);
+    }
+    return toKeyParentDirStatus.getKeyInfo().getObjectID();
+  }
+
+  /**
+   * Check if there are any sub path exist for the given user key path.
+   *
+   * @param omKeyInfo om key path
+   * @param metaMgr   OMMetadataManager
+   * @return true if there are any sub path, false otherwise
+   * @throws IOException DB exception
+   */
+  public static boolean hasChildren(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    return checkSubDirectoryExists(omKeyInfo, metaMgr) ||
+            checkSubFileExists(omKeyInfo, metaMgr);
+  }
+
+  private static boolean checkSubDirectoryExists(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    // Check all dirTable cache for any sub paths.
+    Table dirTable = metaMgr.getDirectoryTable();
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>>>
+            cacheIter = dirTable.cacheIterator();
+
+    while (cacheIter.hasNext()) {
+      Map.Entry<CacheKey<String>, CacheValue<OmDirectoryInfo>> entry =
+              cacheIter.next();
+      OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue();
+      if (cacheOmDirInfo == null) {
+        continue;
+      }
+      if (isImmediateChild(cacheOmDirInfo.getParentObjectID(),
+              omKeyInfo.getObjectID())) {
+        return true; // found a sub path directory
+      }
+    }
+
+    // Check dirTable entries for any sub paths.
+    String seekDirInDB = metaMgr.getOzonePathKey(omKeyInfo.getObjectID(), "");
+    TableIterator<String, ? extends Table.KeyValue<String, OmDirectoryInfo>>
+            iterator = dirTable.iterator();
+
+    iterator.seek(seekDirInDB);
+
+    if (iterator.hasNext()) {
+      OmDirectoryInfo dirInfo = iterator.value().getValue();
+      return isImmediateChild(dirInfo.getParentObjectID(),
+              omKeyInfo.getObjectID());
+    }
+    return false; // no sub paths found
+  }
+
+  private static boolean checkSubFileExists(OmKeyInfo omKeyInfo,
+      OMMetadataManager metaMgr) throws IOException {
+    // Check all fileTable cache for any sub paths.
+    Table fileTable = metaMgr.getKeyTable();
+    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>>
+            cacheIter = fileTable.cacheIterator();
+
+    while (cacheIter.hasNext()) {
+      Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
+              cacheIter.next();
+      OmKeyInfo cacheOmFileInfo = entry.getValue().getCacheValue();
+      if (cacheOmFileInfo == null) {
+        continue;
+      }
+      if (isImmediateChild(cacheOmFileInfo.getParentObjectID(),
+              omKeyInfo.getObjectID())) {
+        return true; // found a sub path file
+      }
+    }
+
+    // Check fileTable entries for any sub paths.
+    String seekFileInDB = metaMgr.getOzonePathKey(
+            omKeyInfo.getObjectID(), "");
+    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+            iterator = fileTable.iterator();
+
+    iterator.seek(seekFileInDB);
+
+    if (iterator.hasNext()) {
+      OmKeyInfo fileInfo = iterator.value().getValue();
+      return isImmediateChild(fileInfo.getParentObjectID(),
+              omKeyInfo.getObjectID()); // found a sub path file
+    }
+    return false; // no sub paths found
+  }
+
+  public static boolean isImmediateChild(long parentId, long ancestorId) {
+    return parentId == ancestorId;
+  }
+
+  /**
+   * Get parent id for the user given path.
+   *
+   * @param bucketId       bucket id
+   * @param pathComponents fie path elements
+   * @param keyName        user given key name
+   * @param omMetadataManager   om metadata manager
+   * @return lastKnownParentID
+   * @throws IOException DB failure or parent not exists in DirectoryTable
+   */
+  public static long getParentID(long bucketId, Iterator<Path> pathComponents,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+
+    long lastKnownParentId = bucketId;
+
+    // If no sub-dirs then bucketID is the root/parent.
+    if(!pathComponents.hasNext()){
+      return bucketId;
+    }
+
+    OmDirectoryInfo omDirectoryInfo;
+    while (pathComponents.hasNext()) {
+      String nodeName = pathComponents.next().toString();
+      boolean reachedLastPathComponent = !pathComponents.hasNext();
+      String dbNodeName =
+              omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
+
+      omDirectoryInfo = omMetadataManager.
+              getDirectoryTable().get(dbNodeName);
+      if (omDirectoryInfo != null) {
+        if (reachedLastPathComponent) {
+          throw new OMException("Can not create file: " + keyName +
+                  " as there is already directory in the given path",
+                  NOT_A_FILE);
+        }
+        lastKnownParentId = omDirectoryInfo.getObjectID();
+      } else {
+        // One of the sub-dir doesn't exists in DB. Immediate parent should
+        // exists for committing the key, otherwise will fail the operation.
+        if (!reachedLastPathComponent) {
+          throw new OMException("Failed to find parent directory of "
+                  + keyName + " in DirectoryTable", KEY_NOT_FOUND);
+        }
+        break;
+      }
+    }
+
+    return lastKnownParentId;
+  }
+
+  /**
+   * Validates volume and bucket existence.
+   *
+   * @param metadataManager
+   * @param volumeName
+   * @param bucketName
+   * @throws IOException
+   */
+  public static void validateBucket(OMMetadataManager metadataManager,
+      String volumeName, String bucketName)
+      throws IOException {
+
+    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+    // Check if bucket exists
+    if (metadataManager.getBucketTable().get(bucketKey) == null) {
+      String volumeKey = metadataManager.getVolumeKey(volumeName);
+      // If the volume also does not exist, we should throw volume not found
+      // exception
+      if (metadataManager.getVolumeTable().get(volumeKey) == null) {
+        LOG.error("volume not found: {}", volumeName);
+        throw new OMException("Volume not found",
+            VOLUME_NOT_FOUND);
+      }
+
+      // if the volume exists but bucket does not exist, throw bucket not found
+      // exception
+      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
+      throw new OMException("Bucket not found",
+          BUCKET_NOT_FOUND);
+    }
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
new file mode 100644
index 0000000..98184f9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.jetbrains.annotations.NotNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles allocate block request - prefix layout.
+ */
+public class OMAllocateBlockRequestWithFSO extends OMAllocateBlockRequest {
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(OMAllocateBlockRequestWithFSO.class);
+
+  public OMAllocateBlockRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    AllocateBlockRequest allocateBlockRequest =
+            getOmRequest().getAllocateBlockRequest();
+
+    KeyArgs keyArgs =
+            allocateBlockRequest.getKeyArgs();
+
+    OzoneManagerProtocolProtos.KeyLocation blockLocation =
+            allocateBlockRequest.getKeyLocation();
+    Preconditions.checkNotNull(blockLocation);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    long clientID = allocateBlockRequest.getClientID();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumBlockAllocateCalls();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    String openKeyName = null;
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+            getOmRequest());
+    OMClientResponse omClientResponse = null;
+
+    OmKeyInfo openKeyInfo = null;
+    IOException exception = null;
+    OmBucketInfo omBucketInfo = null;
+    boolean acquiredLock = false;
+
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.WRITE, allocateBlockRequest.getClientID());
+
+      validateBucketAndVolume(omMetadataManager, volumeName,
+          bucketName);
+
+      // Here we don't acquire bucket/volume lock because for a single client
+      // allocateBlock is called in serial fashion. With this approach, it
+      // won't make 'fail-fast' during race condition case on delete/rename op,
+      // assuming that later it will fail at the key commit operation.
+      openKeyName = getOpenKeyName(volumeName, bucketName, keyName, clientID,
+              ozoneManager);
+      openKeyInfo = getOpenKeyInfo(omMetadataManager, openKeyName, keyName);
+      if (openKeyInfo == null) {
+        throw new OMException("Open Key not found " + openKeyName,
+                KEY_NOT_FOUND);
+      }
+
+      List<OmKeyLocationInfo> newLocationList = Collections.singletonList(
+              OmKeyLocationInfo.getFromProtobuf(blockLocation));
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+      // check bucket and volume quota
+      long preAllocatedSpace = newLocationList.size()
+              * ozoneManager.getScmBlockSize()
+              * openKeyInfo.getReplicationConfig().getRequiredNodes();
+      checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
+      // Append new block
+      openKeyInfo.appendNewBlocks(newLocationList, false);
+
+      // Set modification time.
+      openKeyInfo.setModificationTime(keyArgs.getModificationTime());
+
+      // Set the UpdateID to current transactionLogIndex
+      openKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      // Add to cache.
+      addOpenTableCacheEntry(trxnLogIndex, omMetadataManager, openKeyName,
+              openKeyInfo);
+      omBucketInfo.incrUsedBytes(preAllocatedSpace);
+
+      omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
+              .setKeyLocation(blockLocation).build());
+      omClientResponse = getOmClientResponse(clientID, omResponse,
+              openKeyInfo, omBucketInfo.copyObject());
+      LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}",
+              volumeName, bucketName, openKeyName);
+    } catch (IOException ex) {
+      omMetrics.incNumBlockAllocateCallFails();
+      exception = ex;
+      omClientResponse = new OMAllocateBlockResponseWithFSO(
+          createErrorOMResponse(omResponse, exception));
+      LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " +
+              "Exception:{}", volumeName, bucketName, openKeyName, exception);
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+              omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+                bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap,
+            exception, getOmRequest().getUserInfo()));
+
+    return omClientResponse;
+  }
+
+  private OmKeyInfo getOpenKeyInfo(OMMetadataManager omMetadataManager,
+      String openKeyName, String keyName) throws IOException {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+            omMetadataManager, openKeyName, fileName);
+  }
+
+  private String getOpenKeyName(String volumeName, String bucketName,
+      String keyName, long clientID, OzoneManager ozoneManager)
+          throws IOException {
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+            keyName, omMetadataManager);
+    return omMetadataManager.getOpenFileName(parentID, fileName,
+            clientID);
+  }
+
+  private void addOpenTableCacheEntry(long trxnLogIndex,
+      OMMetadataManager omMetadataManager, String openKeyName,
+      OmKeyInfo openKeyInfo) {
+    String fileName = openKeyInfo.getFileName();
+    OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, openKeyName,
+            openKeyInfo, fileName, trxnLogIndex);
+  }
+
+  @NotNull
+  private OMClientResponse getOmClientResponse(long clientID,
+      OMResponse.Builder omResponse, OmKeyInfo openKeyInfo,
+      OmBucketInfo omBucketInfo) {
+    return new OMAllocateBlockResponseWithFSO(omResponse.build(), openKeyInfo,
+            clientID, omBucketInfo);
+  }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 73d17c3..48fb7e3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -233,6 +233,30 @@
     auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
           exception, getOmRequest().getUserInfo()));
 
+    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+            exception, omKeyInfo, result);
+
+    return omClientResponse;
+  }
+
+  /**
+   * Process result of om request execution.
+   *
+   * @param commitKeyRequest commit key request
+   * @param volumeName       volume name
+   * @param bucketName       bucket name
+   * @param keyName          key name
+   * @param omMetrics        om metrics
+   * @param exception        exception trace
+   * @param omKeyInfo        omKeyInfo
+   * @param result           stores the result of the execution
+   */
+  @SuppressWarnings("parameternumber")
+  protected void processResult(CommitKeyRequest commitKeyRequest,
+                               String volumeName, String bucketName,
+                               String keyName, OMMetrics omMetrics,
+                               IOException exception, OmKeyInfo omKeyInfo,
+                               Result result) {
     switch (result) {
     case SUCCESS:
       // As when we commit the key, then it is visible in ozone, so we should
@@ -244,18 +268,16 @@
         omMetrics.incNumKeys();
       }
       LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
+              bucketName, keyName);
       break;
     case FAILURE:
-      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.",
-          volumeName, bucketName, keyName, exception);
+      LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}",
+              volumeName, bucketName, keyName, exception);
       omMetrics.incNumKeyCommitFails();
       break;
     default:
       LOG.error("Unrecognized Result for OMKeyCommitRequest: {}",
-          commitKeyRequest);
+              commitKeyRequest);
     }
-
-    return omClientResponse;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
new file mode 100644
index 0000000..c871761
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles CommitKey request - prefix layout.
+ */
+public class OMKeyCommitRequestWithFSO extends OMKeyCommitRequest {
+
+  public OMKeyCommitRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
+
+    KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs();
+
+    String volumeName = commitKeyArgs.getVolumeName();
+    String bucketName = commitKeyArgs.getBucketName();
+    String keyName = commitKeyArgs.getKeyName();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumKeyCommits();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+            getOmRequest());
+
+    IOException exception = null;
+    OmKeyInfo omKeyInfo = null;
+    OmBucketInfo omBucketInfo = null;
+    OMClientResponse omClientResponse = null;
+    boolean bucketLockAcquired = false;
+    Result result;
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    try {
+      commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap);
+      volumeName = commitKeyArgs.getVolumeName();
+      bucketName = commitKeyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName,
+              keyName, IAccessAuthorizer.ACLType.WRITE,
+              commitKeyRequest.getClientID());
+
+
+      String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+      Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+      String dbOpenFileKey = null;
+
+      List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
+      for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
+        locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation));
+      }
+
+      bucketLockAcquired =
+              omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+                      volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
+      long bucketId = omBucketInfo.getObjectID();
+      long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+              keyName, omMetadataManager);
+      String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+      dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
+              commitKeyRequest.getClientID());
+
+      omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
+              omMetadataManager, dbOpenFileKey, keyName);
+      if (omKeyInfo == null) {
+        throw new OMException("Failed to commit key, as " + dbOpenFileKey +
+                "entry is not found in the OpenKey table", KEY_NOT_FOUND);
+      }
+      omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
+
+      omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
+
+      // Update the block length for each block
+      omKeyInfo.updateLocationInfoList(locationInfoList, false);
+
+      // Set the UpdateID to current transactionLogIndex
+      omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      // Add to cache of open key table and key table.
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbFileKey,
+              null, fileName, trxnLogIndex);
+
+      OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey,
+              omKeyInfo, fileName, trxnLogIndex);
+
+      long scmBlockSize = ozoneManager.getScmBlockSize();
+      int factor = omKeyInfo.getReplicationConfig().getRequiredNodes();
+      // Block was pre-requested and UsedBytes updated when createKey and
+      // AllocatedBlock. The space occupied by the Key shall be based on
+      // the actual Key size, and the total Block size applied before should
+      // be subtracted.
+      long correctedSpace = omKeyInfo.getDataSize() * factor -
+              locationInfoList.size() * scmBlockSize * factor;
+      omBucketInfo.incrUsedBytes(correctedSpace);
+
+      omClientResponse = new OMKeyCommitResponseWithFSO(omResponse.build(),
+              omKeyInfo, dbFileKey, dbOpenFileKey, omBucketInfo.copyObject());
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyCommitResponseWithFSO(createErrorOMResponse(
+              omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+              omDoubleBufferHelper);
+
+      if(bucketLockAcquired) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+                bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
+            exception, getOmRequest().getUserInfo()));
+
+    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+            exception, omKeyInfo, result);
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index d974a66..06c104b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -349,24 +349,34 @@
         OMAction.ALLOCATE_KEY, auditMap, exception,
         getOmRequest().getUserInfo()));
 
+    logResult(createKeyRequest, omMetrics, exception, result,
+            numMissingParents);
+
+    return omClientResponse;
+  }
+
+  protected void logResult(CreateKeyRequest createKeyRequest,
+      OMMetrics omMetrics, IOException exception, Result result,
+       int numMissingParents) {
     switch (result) {
     case SUCCESS:
       // Missing directories are created immediately, counting that here.
       // The metric for the key is incremented as part of the key commit.
       omMetrics.incNumKeys(numMissingParents);
-      LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}", volumeName,
-          bucketName, keyName);
+      LOG.debug("Key created. Volume:{}, Bucket:{}, Key:{}",
+              createKeyRequest.getKeyArgs().getVolumeName(),
+              createKeyRequest.getKeyArgs().getBucketName(),
+              createKeyRequest.getKeyArgs().getKeyName());
       break;
     case FAILURE:
       LOG.error("Key creation failed. Volume:{}, Bucket:{}, Key{}. " +
-          "Exception:{}", volumeName, bucketName, keyName, exception);
+          "Exception:{}", createKeyRequest.getKeyArgs().getVolumeName(),
+              createKeyRequest.getKeyArgs().getBucketName(),
+              createKeyRequest.getKeyArgs().getKeyName(), exception);
       break;
     default:
       LOG.error("Unrecognized Result for OMKeyCreateRequest: {}",
           createKeyRequest);
     }
-
-    return omClientResponse;
   }
-
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
new file mode 100644
index 0000000..654c251
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
+
+/**
+ * Handles CreateKey request layout version1.
+ */
+public class OMKeyCreateRequestWithFSO extends OMKeyCreateRequest {
+
+  public OMKeyCreateRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest =
+            getOmRequest().getCreateKeyRequest();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs = createKeyRequest.getKeyArgs();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumKeyAllocates();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    OmBucketInfo omBucketInfo = null;
+    final List<OmKeyLocationInfo> locations = new ArrayList<>();
+
+    boolean acquireLock = false;
+    OMClientResponse omClientResponse = null;
+    OzoneManagerProtocolProtos.OMResponse.Builder omResponse =
+            OmResponseUtil.getOMResponseBuilder(getOmRequest());
+    IOException exception = null;
+    Result result;
+    List<OmDirectoryInfo> missingParentInfos;
+    int numKeysCreated = 0;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acl
+      checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+              IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      acquireLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      OmKeyInfo dbFileInfo = null;
+
+      OMFileRequest.OMPathInfoWithFSO pathInfoFSO =
+              OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+                      volumeName, bucketName, keyName, Paths.get(keyName));
+
+      if (pathInfoFSO.getDirectoryResult()
+              == OMFileRequest.OMDirectoryResult.FILE_EXISTS) {
+        String dbFileKey = omMetadataManager.getOzonePathKey(
+                pathInfoFSO.getLastKnownParentId(),
+                pathInfoFSO.getLeafNodeName());
+        dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+                omMetadataManager, dbFileKey, keyName);
+        if (dbFileInfo != null) {
+          ozoneManager.getKeyManager().refresh(dbFileInfo);
+        }
+      }
+
+      // Check if a file or directory exists with same key name.
+      if (pathInfoFSO.getDirectoryResult() == DIRECTORY_EXISTS) {
+        throw new OMException("Cannot write to "
+            + "directory. createIntermediateDirs behavior is enabled and "
+            + "hence / has special interpretation: " + keyName, NOT_A_FILE);
+      } else if (pathInfoFSO.getDirectoryResult()
+          == FILE_EXISTS_IN_GIVENPATH) {
+        throw new OMException("Can not create file: " + keyName
+            + " as there is already file in the given path", NOT_A_FILE);
+      }
+
+      // add all missing parents to dir table
+      missingParentInfos =
+              OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(
+                      ozoneManager, keyArgs, pathInfoFSO, trxnLogIndex);
+
+      // total number of keys created.
+      numKeysCreated = missingParentInfos.size();
+
+      // do open key
+      OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
+              omMetadataManager.getBucketKey(volumeName, bucketName));
+
+      OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs,
+              dbFileInfo, keyArgs.getDataSize(), locations,
+              getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(),
+              bucketInfo, pathInfoFSO, trxnLogIndex,
+              pathInfoFSO.getLeafNodeObjectId(),
+              ozoneManager.isRatisEnabled());
+
+      long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
+      long clientID = createKeyRequest.getClientID();
+      String dbOpenFileName = omMetadataManager
+          .getOpenFileName(pathInfoFSO.getLastKnownParentId(),
+              pathInfoFSO.getLeafNodeName(), clientID);
+
+      // Append new blocks
+      List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList()
+              .stream().map(OmKeyLocationInfo::getFromProtobuf)
+              .collect(Collectors.toList());
+      omFileInfo.appendNewBlocks(newLocationList, false);
+
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+      // check bucket and volume quota
+      long preAllocatedSpace = newLocationList.size()
+              * ozoneManager.getScmBlockSize()
+              * omFileInfo.getReplicationConfig().getRequiredNodes();
+      checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
+      checkBucketQuotaInNamespace(omBucketInfo, 1L);
+
+      // Add to cache entry can be done outside of lock for this openKey.
+      // Even if bucket gets deleted, when commitKey we shall identify if
+      // bucket gets deleted.
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+              dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(),
+              trxnLogIndex);
+
+      // Add cache entries for the prefix directories.
+      // Skip adding for the file key itself, until Key Commit.
+      OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+              Optional.absent(), Optional.of(missingParentInfos),
+              trxnLogIndex);
+
+      omBucketInfo.incrUsedBytes(preAllocatedSpace);
+      // Update namespace quota
+      omBucketInfo.incrUsedNamespace(1L);
+
+      // Prepare response. Sets user given full key name in the 'keyName'
+      // attribute in response object.
+      int clientVersion = getOmRequest().getVersion();
+      omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
+              .setKeyInfo(omFileInfo.getProtobuf(keyName, clientVersion))
+              .setID(clientID)
+              .setOpenVersion(openVersion).build())
+              .setCmdType(Type.CreateKey);
+      omClientResponse = new OMKeyCreateResponseWithFSO(omResponse.build(),
+              omFileInfo, missingParentInfos, clientID,
+              omBucketInfo.copyObject());
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omMetrics.incNumKeyAllocateFails();
+      omResponse.setCmdType(Type.CreateKey);
+      omClientResponse = new OMKeyCreateResponseWithFSO(
+              createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+              omDoubleBufferHelper);
+      if (acquireLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+                bucketName);
+      }
+    }
+
+    // Audit Log outside the lock
+    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+            OMAction.ALLOCATE_KEY, auditMap, exception,
+            getOmRequest().getUserInfo()));
+
+    logResult(createKeyRequest, omMetrics, exception, result,
+            numKeysCreated);
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java
new file mode 100644
index 0000000..8dfc8d5
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles DeleteKey request - prefix layout.
+ */
+public class OMKeyDeleteRequestWithFSO extends OMKeyDeleteRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeyDeleteRequestWithFSO.class);
+
+  public OMKeyDeleteRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();
+
+    OzoneManagerProtocolProtos.KeyArgs keyArgs =
+        deleteKeyRequest.getKeyArgs();
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String keyName = keyArgs.getKeyName();
+    boolean recursive = keyArgs.getRecursive();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumKeyDeletes();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    IOException exception = null;
+    boolean acquiredLock = false;
+    OMClientResponse omClientResponse = null;
+    Result result = null;
+    OmBucketInfo omBucketInfo = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      checkACLs(ozoneManager, volumeName, bucketName, keyName,
+          IAccessAuthorizer.ACLType.DELETE);
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+          volumeName, bucketName);
+
+      // Validate bucket and volume exists or not.
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      OzoneFileStatus keyStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName,
+                      bucketName, keyName, 0);
+
+      if (keyStatus == null) {
+        throw new OMException("Key not found. Key:" + keyName, KEY_NOT_FOUND);
+      }
+
+      OmKeyInfo omKeyInfo = keyStatus.getKeyInfo();
+      // New key format for the fileTable & dirTable.
+      // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+      // keyName field stores only the leaf node name, which is 'file1'.
+      String fileName = OzoneFSUtils.getFileName(keyName);
+      omKeyInfo.setKeyName(fileName);
+
+      // Set the UpdateID to current transactionLogIndex
+      omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      String ozonePathKey = omMetadataManager.getOzonePathKey(
+              omKeyInfo.getParentObjectID(), omKeyInfo.getFileName());
+
+      if (keyStatus.isDirectory()) {
+        // Check if there are any sub path exists under the user requested path
+        if (!recursive && OMFileRequest.hasChildren(omKeyInfo,
+                omMetadataManager)) {
+          throw new OMException("Directory is not empty. Key:" + keyName,
+                  DIRECTORY_NOT_EMPTY);
+        }
+
+        // Update dir cache.
+        omMetadataManager.getDirectoryTable().addCacheEntry(
+                new CacheKey<>(ozonePathKey),
+                new CacheValue<>(Optional.absent(), trxnLogIndex));
+      } else {
+        // Update table cache.
+        omMetadataManager.getKeyTable().addCacheEntry(
+                new CacheKey<>(ozonePathKey),
+                new CacheValue<>(Optional.absent(), trxnLogIndex));
+      }
+
+      omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+
+      // TODO: HDDS-4565: consider all the sub-paths if the path is a dir.
+      long quotaReleased = sumBlockLengths(omKeyInfo);
+      omBucketInfo.incrUsedBytes(-quotaReleased);
+      omBucketInfo.incrUsedNamespace(-1L);
+
+      // No need to add cache entries to delete table. As delete table will
+      // be used by DeleteKeyService only, not used for any client response
+      // validation, so we don't need to add to cache.
+      // TODO: Revisit if we need it later.
+
+      omClientResponse = new OMKeyDeleteResponseWithFSO(omResponse
+          .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
+          keyName, omKeyInfo, ozoneManager.isRatisEnabled(),
+          omBucketInfo.copyObject(), keyStatus.isDirectory());
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyDeleteResponseWithFSO(
+          createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+            omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+            bucketName);
+      }
+    }
+
+    // Performing audit logging outside of the lock.
+    auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap,
+        exception, userInfo));
+
+
+    switch (result) {
+    case SUCCESS:
+      omMetrics.decNumKeys();
+      LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName,
+          bucketName, keyName);
+      break;
+    case FAILURE:
+      omMetrics.incNumKeyDeleteFails();
+      LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.",
+          volumeName, bucketName, keyName, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyDeleteRequest: {}",
+          deleteKeyRequest);
+    }
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
new file mode 100644
index 0000000..86fe2e9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
@@ -0,0 +1,301 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .RenameKeyResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles rename key request - prefix layout.
+ */
+public class OMKeyRenameRequestWithFSO extends OMKeyRenameRequest {
+
+  private static final Logger LOG =
+          LoggerFactory.getLogger(OMKeyRenameRequestWithFSO.class);
+
+  public OMKeyRenameRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+    RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest();
+    KeyArgs keyArgs = renameKeyRequest.getKeyArgs();
+    Map<String, String> auditMap = buildAuditMap(keyArgs, renameKeyRequest);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    String fromKeyName = keyArgs.getKeyName();
+    String toKeyName = renameKeyRequest.getToKeyName();
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumKeyRenames();
+
+    AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+            getOmRequest());
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    boolean acquiredLock = false;
+    OMClientResponse omClientResponse = null;
+    IOException exception = null;
+    OmKeyInfo fromKeyValue;
+    Result result;
+    try {
+      if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
+        throw new OMException("Key name is empty",
+                OMException.ResultCodes.INVALID_KEY_NAME);
+      }
+
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // check Acls to see if user has access to perform delete operation on
+      // old key and create operation on new key
+
+      // check Acl fromKeyName
+      checkACLs(ozoneManager, volumeName, bucketName, fromKeyName,
+          IAccessAuthorizer.ACLType.DELETE);
+
+      // check Acl toKeyName
+      checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName,
+              IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+      acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+
+      // Validate bucket and volume exists or not.
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      // Check if fromKey exists
+      OzoneFileStatus fromKeyFileStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName,
+                      bucketName, fromKeyName, 0);
+      // case-1) fromKeyName should exist, otw throws exception
+      if (fromKeyFileStatus == null) {
+        // TODO: Add support for renaming open key
+        throw new OMException("Key not found " + fromKeyName, KEY_NOT_FOUND);
+      }
+
+      // source existed
+      fromKeyValue = fromKeyFileStatus.getKeyInfo();
+      boolean isRenameDirectory = fromKeyFileStatus.isDirectory();
+
+      // case-2) Cannot rename a directory to its own subdirectory
+      OMFileRequest.verifyToDirIsASubDirOfFromDirectory(fromKeyName,
+              toKeyName, fromKeyFileStatus.isDirectory());
+
+      OzoneFileStatus toKeyFileStatus =
+              OMFileRequest.getOMKeyInfoIfExists(omMetadataManager,
+                      volumeName, bucketName, toKeyName, 0);
+
+      // Check if toKey exists.
+      if(toKeyFileStatus != null) {
+        // Destination exists and following are different cases:
+        OmKeyInfo toKeyValue = toKeyFileStatus.getKeyInfo();
+
+        if (fromKeyValue.getKeyName().equals(toKeyValue.getKeyName())) {
+          // case-3) If src == destin then check source and destin of same type
+          // (a) If dst is a file then return true.
+          // (b) Otherwise throws exception.
+          // TODO: Discuss do we need to throw exception for file as well.
+          if (toKeyFileStatus.isFile()) {
+            result = Result.SUCCESS;
+          } else {
+            throw new OMException("Key already exists " + toKeyName,
+                    OMException.ResultCodes.KEY_ALREADY_EXISTS);
+          }
+        } else if (toKeyFileStatus.isDirectory()) {
+          // case-4) If dst is a directory then rename source as sub-path of it
+          // For example: rename /source to /dst will lead to /dst/source
+          String fromFileName = OzoneFSUtils.getFileName(fromKeyName);
+          String newToKeyName = OzoneFSUtils.appendFileNameToKeyPath(toKeyName,
+                  fromFileName);
+          OzoneFileStatus newToOzoneFileStatus =
+                  OMFileRequest.getOMKeyInfoIfExists(omMetadataManager,
+                          volumeName, bucketName, newToKeyName, 0);
+
+          if (newToOzoneFileStatus != null) {
+            // case-5) If new destin '/dst/source' exists then throws exception
+            throw new OMException(String.format(
+                    "Failed to rename %s to %s, file already exists or not " +
+                            "empty!", fromKeyName, newToKeyName),
+                    OMException.ResultCodes.KEY_ALREADY_EXISTS);
+          }
+
+          omClientResponse = renameKey(toKeyValue.getObjectID(), trxnLogIndex,
+                  fromKeyValue, isRenameDirectory, newToKeyName,
+                  keyArgs.getModificationTime(), omResponse, ozoneManager);
+          result = Result.SUCCESS;
+        } else {
+          // case-6) If destination is a file type and if exists then throws
+          // key already exists exception.
+          throw new OMException("Failed to rename, key already exists "
+                  + toKeyName, OMException.ResultCodes.KEY_ALREADY_EXISTS);
+        }
+      } else {
+        // Destination doesn't exist and the cases are:
+        // case-7) Check whether dst parent dir exists or not. If parent
+        // doesn't exist then throw exception, otw the source can be renamed to
+        // destination path.
+        long toKeyParentId = OMFileRequest.getToKeyNameParentId(volumeName,
+                bucketName, toKeyName, fromKeyName, omMetadataManager);
+
+        omClientResponse = renameKey(toKeyParentId, trxnLogIndex,
+                fromKeyValue, isRenameDirectory, toKeyName,
+                keyArgs.getModificationTime(), omResponse, ozoneManager);
+
+        result = Result.SUCCESS;
+      }
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new OMKeyRenameResponseWithFSO(createErrorOMResponse(
+              omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+              omDoubleBufferHelper);
+      if (acquiredLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+                bucketName);
+      }
+    }
+
+    auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap,
+            exception, getOmRequest().getUserInfo()));
+
+    switch (result) {
+    case SUCCESS:
+      LOG.debug("Rename Key is successfully completed for volume:{} bucket:{}" +
+                      " fromKey:{} toKey:{}. ", volumeName, bucketName,
+              fromKeyName, toKeyName);
+      break;
+    case FAILURE:
+      ozoneManager.getMetrics().incNumKeyRenameFails();
+      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} " +
+                      "toKey:{}. Key: {} not found.", volumeName, bucketName,
+              fromKeyName, toKeyName, fromKeyName);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyRenameRequest: {}",
+              renameKeyRequest);
+    }
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("parameternumber")
+  private OMClientResponse renameKey(long toKeyParentId,
+      long trxnLogIndex, OmKeyInfo fromKeyValue, boolean isRenameDirectory,
+      String toKeyName, long modificationTime, OMResponse.Builder omResponse,
+      OzoneManager ozoneManager) {
+
+    String dbFromKey = fromKeyValue.getPath();
+    String toKeyFileName = OzoneFSUtils.getFileName(toKeyName);
+
+    fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+    // Set toFileName
+    fromKeyValue.setKeyName(toKeyFileName);
+    fromKeyValue.setFileName(toKeyFileName);
+    // Set toKeyObjectId
+    fromKeyValue.setParentObjectID(toKeyParentId);
+    //Set modification time
+    fromKeyValue.setModificationTime(modificationTime);
+
+    // destination dbKeyName
+    String dbToKey = fromKeyValue.getPath();
+
+    // Add to cache.
+    // dbFromKey should be deleted, dbToKey should be added with newly updated
+    // omKeyInfo.
+    // Add from_key and to_key details into cache.
+    OMMetadataManager metadataMgr = ozoneManager.getMetadataManager();
+    if (isRenameDirectory) {
+      Table<String, OmDirectoryInfo> dirTable = metadataMgr.getDirectoryTable();
+      dirTable.addCacheEntry(new CacheKey<>(dbFromKey),
+              new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+      dirTable.addCacheEntry(new CacheKey<>(dbToKey),
+              new CacheValue<>(Optional.of(OMFileRequest.
+                              getDirectoryInfo(fromKeyValue)), trxnLogIndex));
+    } else {
+      Table<String, OmKeyInfo> keyTable = metadataMgr.getKeyTable();
+
+      keyTable.addCacheEntry(new CacheKey<>(dbFromKey),
+              new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+      keyTable.addCacheEntry(new CacheKey<>(dbToKey),
+              new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex));
+    }
+
+    OMClientResponse omClientResponse = new OMKeyRenameResponseWithFSO(
+        omResponse.setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
+        dbFromKey, dbToKey, fromKeyValue, isRenameDirectory);
+    return omClientResponse;
+  }
+
+  private Map<String, String> buildAuditMap(
+          KeyArgs keyArgs, RenameKeyRequest renameKeyRequest) {
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+    auditMap.remove(OzoneConsts.KEY);
+    auditMap.put(OzoneConsts.SRC_KEY, keyArgs.getKeyName());
+    auditMap.put(OzoneConsts.DST_KEY, renameKeyRequest.getToKeyName());
+    return auditMap;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index df3e505..0647be0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -46,6 +46,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
@@ -240,37 +241,6 @@
     return edek;
   }
 
-  /**
-   * Create OmKeyInfo object.
-   * @return OmKeyInfo
-   */
-  @SuppressWarnings("parameterNumber")
-  protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nonnull ReplicationConfig replicationConfig,
-      long size,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo,
-      long transactionLogIndex, long objectID) {
-    return new OmKeyInfo.Builder()
-        .setVolumeName(keyArgs.getVolumeName())
-        .setBucketName(keyArgs.getBucketName())
-        .setKeyName(keyArgs.getKeyName())
-        .setOmKeyLocationInfos(Collections.singletonList(
-            new OmKeyLocationInfoGroup(0, locations)))
-        .setCreationTime(keyArgs.getModificationTime())
-        .setModificationTime(keyArgs.getModificationTime())
-        .setDataSize(size)
-        .setReplicationConfig(replicationConfig)
-        .setFileEncryptionInfo(encInfo)
-        .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
-        .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
-        .setObjectID(objectID)
-        .setUpdateID(transactionLogIndex)
-        .build();
-  }
-
   protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs,
       OmBucketInfo bucketInfo, PrefixManager prefixManager) {
     List<OzoneAcl> acls = new ArrayList<>();
@@ -310,95 +280,6 @@
   }
 
   /**
-   * Prepare OmKeyInfo which will be persisted to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  protected OmKeyInfo prepareKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      @Nullable FileEncryptionInfo encInfo,
-      @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo,
-      long transactionLogIndex,
-      @Nonnull long objectID,
-      boolean isRatisEnabled)
-      throws IOException {
-    if (keyArgs.getIsMultipartKey()) {
-      return prepareMultipartKeyInfo(omMetadataManager, keyArgs,
-          size, locations, encInfo, prefixManager, omBucketInfo,
-          transactionLogIndex, objectID);
-      //TODO args.getMetadata
-    }
-    if (dbKeyInfo != null) {
-      // TODO: Need to be fixed, as when key already exists, we are
-      //  appending new blocks to existing key.
-      // The key already exist, the new blocks will be added as new version
-      // when locations.size = 0, the new version will have identical blocks
-      // as its previous version
-      dbKeyInfo.addNewVersion(locations, false);
-      dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
-      // The modification time is set in preExecute. Use the same
-      // modification time.
-      dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
-      dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
-      return dbKeyInfo;
-    }
-
-    // the key does not exist, create a new object.
-    // Blocks will be appended as version 0.
-    return createKeyInfo(keyArgs, locations,
-        ReplicationConfig.fromTypeAndFactor(
-                keyArgs.getType(), keyArgs.getFactor()),
-        keyArgs.getDataSize(), encInfo, prefixManager,
-        omBucketInfo, transactionLogIndex, objectID);
-  }
-
-  /**
-   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
-   * to openKeyTable.
-   * @return OmKeyInfo
-   * @throws IOException
-   */
-  @SuppressWarnings("parameternumber")
-  private OmKeyInfo prepareMultipartKeyInfo(
-      @Nonnull OMMetadataManager omMetadataManager,
-      @Nonnull KeyArgs args, long size,
-      @Nonnull List<OmKeyLocationInfo> locations,
-      FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
-      @Nullable OmBucketInfo omBucketInfo, @Nonnull long transactionLogIndex,
-      @Nonnull long objectId)
-      throws IOException {
-
-
-    Preconditions.checkArgument(args.getMultipartNumber() > 0,
-        "PartNumber Should be greater than zero");
-    // When key is multipart upload part key, we should take replication
-    // type and replication factor from original key which has done
-    // initiate multipart upload. If we have not found any such, we throw
-    // error no such multipart upload.
-    String uploadID = args.getMultipartUploadID();
-    Preconditions.checkNotNull(uploadID);
-    String multipartKey = omMetadataManager
-        .getMultipartKey(args.getVolumeName(), args.getBucketName(),
-            args.getKeyName(), uploadID);
-    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
-        multipartKey);
-    if (partKeyInfo == null) {
-      throw new OMException("No such Multipart upload is with specified " +
-          "uploadId " + uploadID,
-          OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
-    }
-    // For this upload part we don't need to check in KeyTable. As this
-    // is not an actual key, it is a part of the key.
-    return createKeyInfo(args, locations, partKeyInfo.getReplicationConfig(),
-        size, encInfo, prefixManager, omBucketInfo, transactionLogIndex,
-        objectId);
-  }
-
-  /**
    * Check Acls for the ozone bucket.
    * @param ozoneManager
    * @param volume
@@ -416,7 +297,6 @@
     }
   }
 
-
   /**
    * Check Acls for the ozone key.
    * @param ozoneManager
@@ -677,4 +557,162 @@
         new CacheKey<>(omMetadataManager.getBucketKey(volume, bucket)))
         .getCacheValue();
   }
+
+  /**
+   * Prepare OmKeyInfo which will be persisted to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  protected OmKeyInfo prepareKeyInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          @Nullable FileEncryptionInfo encInfo,
+          @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          long transactionLogIndex, long objectID, boolean isRatisEnabled)
+          throws IOException {
+
+    return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size,
+            locations, encInfo, prefixManager, omBucketInfo, null,
+            transactionLogIndex, objectID, isRatisEnabled);
+  }
+
+  /**
+   * Prepare OmKeyInfo which will be persisted to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  protected OmKeyInfo prepareFileInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          @Nullable FileEncryptionInfo encInfo,
+          @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          OMFileRequest.OMPathInfoWithFSO omPathInfo,
+          long transactionLogIndex, long objectID,
+          boolean isRatisEnabled)
+          throws IOException {
+    if (keyArgs.getIsMultipartKey()) {
+      return prepareMultipartFileInfo(omMetadataManager, keyArgs,
+              size, locations, encInfo, prefixManager, omBucketInfo,
+              omPathInfo, transactionLogIndex, objectID);
+      //TODO args.getMetadata
+    }
+    if (dbKeyInfo != null) {
+      // TODO: Need to be fixed, as when key already exists, we are
+      //  appending new blocks to existing key.
+      // The key already exist, the new blocks will be added as new version
+      // when locations.size = 0, the new version will have identical blocks
+      // as its previous version
+      dbKeyInfo.addNewVersion(locations, false);
+      dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
+      // The modification time is set in preExecute. Use the same
+      // modification time.
+      dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
+      dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
+      return dbKeyInfo;
+    }
+
+    // the key does not exist, create a new object.
+    // Blocks will be appended as version 0.
+    return createFileInfo(keyArgs, locations,
+            ReplicationConfig.fromTypeAndFactor(
+                    keyArgs.getType(), keyArgs.getFactor()),
+            keyArgs.getDataSize(), encInfo, prefixManager,
+            omBucketInfo, omPathInfo, transactionLogIndex, objectID);
+  }
+
+  /**
+   * Create OmKeyInfo object.
+   * @return OmKeyInfo
+   */
+  @SuppressWarnings("parameterNumber")
+  protected OmKeyInfo createFileInfo(@Nonnull KeyArgs keyArgs,
+      @Nonnull List<OmKeyLocationInfo> locations,
+      @Nonnull ReplicationConfig replicationConfig,
+      long size,
+      @Nullable FileEncryptionInfo encInfo,
+      @Nonnull PrefixManager prefixManager,
+      @Nullable OmBucketInfo omBucketInfo,
+      OMFileRequest.OMPathInfoWithFSO omPathInfo,
+      long transactionLogIndex, long objectID) {
+
+    OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+    builder.setVolumeName(keyArgs.getVolumeName())
+            .setBucketName(keyArgs.getBucketName())
+            .setKeyName(keyArgs.getKeyName())
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, locations)))
+            .setCreationTime(keyArgs.getModificationTime())
+            .setModificationTime(keyArgs.getModificationTime())
+            .setDataSize(size)
+            .setReplicationConfig(replicationConfig)
+            .setFileEncryptionInfo(encInfo)
+            .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
+            .addAllMetadata(KeyValueUtil.getFromProtobuf(
+                    keyArgs.getMetadataList()))
+            .setUpdateID(transactionLogIndex);
+    if (omPathInfo != null) {
+      // FileTable metadata format
+      objectID = omPathInfo.getLeafNodeObjectId();
+      builder.setParentObjectID(omPathInfo.getLastKnownParentId());
+      builder.setFileName(omPathInfo.getLeafNodeName());
+    }
+    builder.setObjectID(objectID);
+    return builder.build();
+  }
+
+  /**
+   * Prepare OmKeyInfo for multi-part upload part key which will be persisted
+   * to openKeyTable.
+   * @return OmKeyInfo
+   * @throws IOException
+   */
+  @SuppressWarnings("parameternumber")
+  private OmKeyInfo prepareMultipartFileInfo(
+          @Nonnull OMMetadataManager omMetadataManager,
+          @Nonnull KeyArgs args, long size,
+          @Nonnull List<OmKeyLocationInfo> locations,
+          FileEncryptionInfo encInfo,  @Nonnull PrefixManager prefixManager,
+          @Nullable OmBucketInfo omBucketInfo,
+          OMFileRequest.OMPathInfoWithFSO omPathInfo,
+          @Nonnull long transactionLogIndex, long objectID)
+          throws IOException {
+
+    Preconditions.checkArgument(args.getMultipartNumber() > 0,
+            "PartNumber Should be greater than zero");
+    // When key is multipart upload part key, we should take replication
+    // type and replication factor from original key which has done
+    // initiate multipart upload. If we have not found any such, we throw
+    // error no such multipart upload.
+    String uploadID = args.getMultipartUploadID();
+    Preconditions.checkNotNull(uploadID);
+    String multipartKey = "";
+    if (omPathInfo != null) {
+      // FileTable metadata format
+      multipartKey = omMetadataManager.getMultipartKey(
+              omPathInfo.getLastKnownParentId(),
+              omPathInfo.getLeafNodeName(), uploadID);
+    } else {
+      multipartKey = omMetadataManager
+              .getMultipartKey(args.getVolumeName(), args.getBucketName(),
+                      args.getKeyName(), uploadID);
+    }
+    OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
+            multipartKey);
+    if (partKeyInfo == null) {
+      throw new OMException("No such Multipart upload is with specified " +
+              "uploadId " + uploadID,
+              OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
+    }
+    // For this upload part we don't need to check in KeyTable. As this
+    // is not an actual key, it is a part of the key.
+    return createFileInfo(args, locations, partKeyInfo.getReplicationConfig(),
+            size, encInfo, prefixManager, omBucketInfo, omPathInfo,
+            transactionLogIndex, objectID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMPathsPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMPathsPurgeRequestWithFSO.java
new file mode 100644
index 0000000..4515cd5
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMPathsPurgeRequestWithFSO.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMPathsPurgeResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import java.util.List;
+
+/**
+ * Handles purging of keys from OM DB.
+ */
+public class OMPathsPurgeRequestWithFSO extends OMKeyRequest {
+
+  public OMPathsPurgeRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    OzoneManagerProtocolProtos.PurgePathsRequest purgePathsRequest =
+        getOmRequest().getPurgePathsRequest();
+
+    List<String> deletedDirsList = purgePathsRequest.getDeletedDirsList();
+    List<OzoneManagerProtocolProtos.KeyInfo> deletedSubFilesList =
+        purgePathsRequest.getDeletedSubFilesList();
+    List<OzoneManagerProtocolProtos.KeyInfo> markDeletedSubDirsList =
+        purgePathsRequest.getMarkDeletedSubDirsList();
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+
+    OMClientResponse omClientResponse = new OMPathsPurgeResponseWithFSO(
+        omResponse.build(), markDeletedSubDirsList, deletedSubFilesList,
+        deletedDirsList, ozoneManager.isRatisEnabled());
+    addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+        omDoubleBufferHelper);
+
+    return omClientResponse;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java
new file mode 100644
index 0000000..c264300
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key.acl;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.ObjectParser;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+
+/**
+ * Handles key ACL requests - prefix layout.
+ */
+public abstract class OMKeyAclRequestWithFSO extends OMKeyAclRequest {
+
+  public OMKeyAclRequestWithFSO(OzoneManagerProtocolProtos.OMRequest omReq) {
+    super(omReq);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    OmKeyInfo omKeyInfo = null;
+
+    OzoneManagerProtocolProtos.OMResponse.Builder omResponse = onInit();
+    OMClientResponse omClientResponse = null;
+    IOException exception = null;
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    boolean lockAcquired = false;
+    String volume = null;
+    String bucket = null;
+    String key = null;
+    boolean operationResult = false;
+    Result result = null;
+    try {
+      ObjectParser objectParser = new ObjectParser(getPath(),
+          OzoneManagerProtocolProtos.OzoneObj.ObjectType.KEY);
+
+      volume = objectParser.getVolume();
+      bucket = objectParser.getBucket();
+      key = objectParser.getKey();
+
+      // check Acl
+      if (ozoneManager.getAclsEnabled()) {
+        checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME,
+            OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL,
+            volume, bucket, key);
+      }
+      lockAcquired = omMetadataManager.getLock()
+          .acquireWriteLock(BUCKET_LOCK, volume, bucket);
+      OzoneFileStatus keyStatus = OMFileRequest
+          .getOMKeyInfoIfExists(omMetadataManager, volume, bucket, key, 0);
+      if (keyStatus == null) {
+        throw new OMException("Key not found. Key:" + key, KEY_NOT_FOUND);
+      }
+      omKeyInfo = keyStatus.getKeyInfo();
+      String dbKey = omKeyInfo.getPath();
+      boolean isDirectory = keyStatus.isDirectory();
+      operationResult = apply(omKeyInfo, trxnLogIndex);
+      omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+      // Update the modification time when updating ACLs of Key.
+      long modificationTime = omKeyInfo.getModificationTime();
+      if (getOmRequest().getAddAclRequest().hasObj() && operationResult) {
+        modificationTime =
+            getOmRequest().getAddAclRequest().getModificationTime();
+      } else if (getOmRequest().getSetAclRequest().hasObj()) {
+        modificationTime =
+            getOmRequest().getSetAclRequest().getModificationTime();
+      } else if (getOmRequest().getRemoveAclRequest().hasObj()
+          && operationResult) {
+        modificationTime =
+            getOmRequest().getRemoveAclRequest().getModificationTime();
+      }
+      omKeyInfo.setModificationTime(modificationTime);
+
+      // update cache.
+      if (isDirectory) {
+        Table<String, OmDirectoryInfo> dirTable =
+            omMetadataManager.getDirectoryTable();
+        dirTable.addCacheEntry(new CacheKey<>(dbKey),
+            new CacheValue<>(Optional.of(OMFileRequest.
+                getDirectoryInfo(omKeyInfo)), trxnLogIndex));
+      } else {
+        omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(dbKey),
+            new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omClientResponse =
+          onSuccess(omResponse, omKeyInfo, operationResult, isDirectory);
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = onFailure(omResponse, ex);
+    } finally {
+      addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+          omDoubleBufferHelper);
+      if (lockAcquired) {
+        omMetadataManager.getLock()
+            .releaseWriteLock(BUCKET_LOCK, volume, bucket);
+      }
+    }
+
+    OzoneObj obj = getObject();
+    Map<String, String> auditMap = obj.toAuditMap();
+    onComplete(result, operationResult, exception, trxnLogIndex,
+        ozoneManager.getAuditLogger(), auditMap);
+
+    return omClientResponse;
+  }
+
+  /**
+   * Get the om client response on failure case with lock.
+   *
+   * @param omResp
+   * @param exception
+   * @return OMClientResponse
+   */
+  @Override
+  OMClientResponse onFailure(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResp,
+      IOException exception) {
+    return new OMKeyAclResponseWithFSO(
+        createErrorOMResponse(omResp, exception));
+  }
+
+  abstract OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult, boolean isDirectory);
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java
new file mode 100644
index 0000000..e62387c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key.acl;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Handle add Acl request for bucket for prefix layout.
+ */
+public class OMKeyAddAclRequestWithFSO extends OMKeyAclRequestWithFSO {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeyAddAclRequestWithFSO.class);
+
+  @Override
+  public OzoneManagerProtocolProtos.OMRequest preExecute(
+      OzoneManager ozoneManager) throws IOException {
+    long modificationTime = Time.now();
+    OzoneManagerProtocolProtos.AddAclRequest.Builder addAclRequestBuilder =
+        getOmRequest().getAddAclRequest().toBuilder()
+            .setModificationTime(modificationTime);
+
+    return getOmRequest().toBuilder().setAddAclRequest(addAclRequestBuilder)
+        .setUserInfo(getUserInfo()).build();
+  }
+
+  private String path;
+  private List<OzoneAcl> ozoneAcls;
+  private OzoneObj obj;
+
+  public OMKeyAddAclRequestWithFSO(
+      OzoneManagerProtocolProtos.OMRequest omReq) {
+    super(omReq);
+    OzoneManagerProtocolProtos.AddAclRequest addAclRequest =
+        getOmRequest().getAddAclRequest();
+    obj = OzoneObjInfo.fromProtobuf(addAclRequest.getObj());
+    path = obj.getPath();
+    ozoneAcls =
+        Lists.newArrayList(OzoneAcl.fromProtobuf(addAclRequest.getAcl()));
+  }
+
+  @Override String getPath() {
+    return path;
+  }
+
+  @Override OzoneObj getObject() {
+    return obj;
+  }
+
+  @Override OzoneManagerProtocolProtos.OMResponse.Builder onInit() {
+    return OmResponseUtil.getOMResponseBuilder(getOmRequest());
+  }
+
+  @Override OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setAddAclResponse(
+        OzoneManagerProtocolProtos.AddAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponse(omResponse.build(), omKeyInfo);
+  }
+
+  @Override void onComplete(Result result, boolean operationResult,
+      IOException exception, long trxnLogIndex, AuditLogger auditLogger,
+      Map<String, String> auditMap) {
+    switch (result) {
+    case SUCCESS:
+      if (LOG.isDebugEnabled()) {
+        if (operationResult) {
+          LOG.debug("Add acl: {} to path: {} success!", ozoneAcls, path);
+        } else {
+          LOG.debug("Acl {} already exists in path {}", ozoneAcls, path);
+        }
+      }
+      break;
+    case FAILURE:
+      LOG.error("Add acl {} to path {} failed!", ozoneAcls, path, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyAddAclRequest: {}",
+          getOmRequest());
+    }
+
+    if (ozoneAcls != null) {
+      auditMap.put(OzoneConsts.ACL, ozoneAcls.toString());
+    }
+    auditLog(auditLogger,
+        buildAuditMessage(OMAction.ADD_ACL, auditMap, exception,
+            getOmRequest().getUserInfo()));
+  }
+
+  @Override boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) {
+    // No need to check not null here, this will be never called with null.
+    return omKeyInfo.addAcl(ozoneAcls.get(0));
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    ozoneManager.getMetrics().incNumAddAcl();
+    return super.validateAndUpdateCache(ozoneManager, trxnLogIndex,
+        omDoubleBufferHelper);
+  }
+
+  @Override OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult, boolean isDir) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setAddAclResponse(
+        OzoneManagerProtocolProtos.AddAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponseWithFSO(omResponse.build(), omKeyInfo, isDir);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java
new file mode 100644
index 0000000..dfd2c7a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key.acl;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Handle remove Acl request for bucket for prefix layout.
+ */
+public class OMKeyRemoveAclRequestWithFSO extends OMKeyAclRequestWithFSO {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeyRemoveAclRequestWithFSO.class);
+
+  @Override
+  public OzoneManagerProtocolProtos.OMRequest preExecute(
+      OzoneManager ozoneManager) throws IOException {
+    long modificationTime = Time.now();
+    OzoneManagerProtocolProtos.RemoveAclRequest.Builder
+        removeAclRequestBuilder =
+        getOmRequest().getRemoveAclRequest().toBuilder()
+            .setModificationTime(modificationTime);
+
+    return getOmRequest().toBuilder()
+        .setRemoveAclRequest(removeAclRequestBuilder).setUserInfo(getUserInfo())
+        .build();
+  }
+
+  private String path;
+  private List<OzoneAcl> ozoneAcls;
+  private OzoneObj obj;
+
+  public OMKeyRemoveAclRequestWithFSO(
+      OzoneManagerProtocolProtos.OMRequest omRequest) {
+    super(omRequest);
+    OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest =
+        getOmRequest().getRemoveAclRequest();
+    obj = OzoneObjInfo.fromProtobuf(removeAclRequest.getObj());
+    path = obj.getPath();
+    ozoneAcls =
+        Lists.newArrayList(OzoneAcl.fromProtobuf(removeAclRequest.getAcl()));
+  }
+
+  @Override
+  String getPath() {
+    return path;
+  }
+
+  @Override
+  OzoneObj getObject() {
+    return obj;
+  }
+
+  @Override
+  OzoneManagerProtocolProtos.OMResponse.Builder onInit() {
+    return OmResponseUtil.getOMResponseBuilder(getOmRequest());
+  }
+
+  @Override
+  OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setRemoveAclResponse(
+        OzoneManagerProtocolProtos.RemoveAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponse(omResponse.build(), omKeyInfo);
+  }
+
+  @Override
+  void onComplete(Result result, boolean operationResult,
+      IOException exception, long trxnLogIndex, AuditLogger auditLogger,
+      Map<String, String> auditMap) {
+    switch (result) {
+    case SUCCESS:
+      if (LOG.isDebugEnabled()) {
+        if (operationResult) {
+          LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls, path);
+        } else {
+          LOG.debug("Acl {} not removed from path {} as it does not exist",
+              ozoneAcls, path);
+        }
+      }
+      break;
+    case FAILURE:
+      LOG.error("Remove acl {} to path {} failed!", ozoneAcls, path, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeyRemoveAclRequest: {}",
+          getOmRequest());
+    }
+
+    if (ozoneAcls != null) {
+      auditMap.put(OzoneConsts.ACL, ozoneAcls.toString());
+    }
+    auditLog(auditLogger,
+        buildAuditMessage(OMAction.REMOVE_ACL, auditMap, exception,
+            getOmRequest().getUserInfo()));
+  }
+
+  @Override
+  boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) {
+    // No need to check not null here, this will be never called with null.
+    return omKeyInfo.removeAcl(ozoneAcls.get(0));
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    ozoneManager.getMetrics().incNumRemoveAcl();
+    return super.validateAndUpdateCache(ozoneManager, trxnLogIndex,
+        omDoubleBufferHelper);
+  }
+
+  @Override
+  OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult, boolean isDir) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setRemoveAclResponse(
+        OzoneManagerProtocolProtos.RemoveAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponseWithFSO(omResponse.build(), omKeyInfo, isDir);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java
new file mode 100644
index 0000000..20598e8
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key.acl;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
+import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Handle set Acl request for bucket for prefix layout.
+ */
+public class OMKeySetAclRequestWithFSO extends OMKeyAclRequestWithFSO {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMKeySetAclRequestWithFSO.class);
+
+  @Override
+  public OzoneManagerProtocolProtos.OMRequest preExecute(
+      OzoneManager ozoneManager) throws IOException {
+    long modificationTime = Time.now();
+    OzoneManagerProtocolProtos.SetAclRequest.Builder setAclRequestBuilder =
+        getOmRequest().getSetAclRequest().toBuilder()
+            .setModificationTime(modificationTime);
+
+    return getOmRequest().toBuilder().setSetAclRequest(setAclRequestBuilder)
+        .setUserInfo(getUserInfo()).build();
+  }
+
+  private String path;
+  private List<OzoneAcl> ozoneAcls;
+  private OzoneObj obj;
+
+  public OMKeySetAclRequestWithFSO(
+      OzoneManagerProtocolProtos.OMRequest omReq) {
+    super(omReq);
+    OzoneManagerProtocolProtos.SetAclRequest setAclRequest =
+        getOmRequest().getSetAclRequest();
+    obj = OzoneObjInfo.fromProtobuf(setAclRequest.getObj());
+    path = obj.getPath();
+    ozoneAcls = Lists
+        .newArrayList(OzoneAclUtil.fromProtobuf(setAclRequest.getAclList()));
+  }
+
+  @Override
+  String getPath() {
+    return path;
+  }
+
+  @Override
+  OzoneObj getObject() {
+    return obj;
+  }
+
+  @Override
+  OzoneManagerProtocolProtos.OMResponse.Builder onInit() {
+    return OmResponseUtil.getOMResponseBuilder(getOmRequest());
+  }
+
+  @Override
+  OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setSetAclResponse(
+        OzoneManagerProtocolProtos.SetAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponse(omResponse.build(), omKeyInfo);
+  }
+
+  @Override
+  void onComplete(Result result, boolean operationResult,
+      IOException exception, long trxnLogIndex, AuditLogger auditLogger,
+      Map<String, String> auditMap) {
+    switch (result) {
+    case SUCCESS:
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, path);
+      }
+      break;
+    case FAILURE:
+      LOG.error("Set acl {} to path {} failed!", ozoneAcls, path, exception);
+      break;
+    default:
+      LOG.error("Unrecognized Result for OMKeySetAclRequest: {}",
+          getOmRequest());
+    }
+
+    if (ozoneAcls != null) {
+      auditMap.put(OzoneConsts.ACL, ozoneAcls.toString());
+    }
+    auditLog(auditLogger,
+        buildAuditMessage(OMAction.SET_ACL, auditMap, exception,
+            getOmRequest().getUserInfo()));
+  }
+
+  @Override
+  boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) {
+    // No need to check not null here, this will be never called with null.
+    return omKeyInfo.setAcls(ozoneAcls);
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+    ozoneManager.getMetrics().incNumSetAcl();
+    return super.validateAndUpdateCache(ozoneManager, trxnLogIndex,
+        omDoubleBufferHelper);
+  }
+
+  @Override
+  OMClientResponse onSuccess(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      OmKeyInfo omKeyInfo, boolean operationResult, boolean isDir) {
+    omResponse.setSuccess(operationResult);
+    omResponse.setSetAclResponse(
+        OzoneManagerProtocolProtos.SetAclResponse.newBuilder()
+            .setResponse(operationResult));
+    return new OMKeyAclResponseWithFSO(omResponse.build(), omKeyInfo, isDir);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index 224c51d..4b10cf6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -229,7 +229,17 @@
             volumeName, bucketName);
       }
     }
+    logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName,
+            bucketName, keyName, exception, result);
 
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("parameternumber")
+  protected void logResult(OzoneManager ozoneManager,
+      MultipartInfoInitiateRequest multipartInfoInitiateRequest,
+      Map<String, String> auditMap, String volumeName, String bucketName,
+      String keyName, IOException exception, Result result) {
     // audit log
     auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
         OMAction.INITIATE_MULTIPART_UPLOAD, auditMap,
@@ -251,7 +261,5 @@
       LOG.error("Unrecognized Result for S3InitiateMultipartUploadRequest: {}",
           multipartInfoInitiateRequest);
     }
-
-    return omClientResponse;
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
new file mode 100644
index 0000000..da13536
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+
+/**
+ * Handles initiate multipart upload request.
+ */
+public class S3InitiateMultipartUploadRequestWithFSO
+        extends S3InitiateMultipartUploadRequest {
+
+  public S3InitiateMultipartUploadRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  @SuppressWarnings("methodlength")
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long transactionLogIndex,
+      OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
+    MultipartInfoInitiateRequest multipartInfoInitiateRequest =
+        getOmRequest().getInitiateMultiPartUploadRequest();
+
+    KeyArgs keyArgs =
+        multipartInfoInitiateRequest.getKeyArgs();
+
+    Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
+
+    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
+
+    String volumeName = keyArgs.getVolumeName();
+    String bucketName = keyArgs.getBucketName();
+    final String requestedVolume = volumeName;
+    final String requestedBucket = bucketName;
+    String keyName = keyArgs.getKeyName();
+
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+    ozoneManager.getMetrics().incNumInitiateMultipartUploads();
+    boolean acquiredBucketLock = false;
+    IOException exception = null;
+    OmMultipartKeyInfo multipartKeyInfo = null;
+    OmKeyInfo omKeyInfo = null;
+    List<OmDirectoryInfo> missingParentInfos;
+    Result result = null;
+
+    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+        getOmRequest());
+    OMClientResponse omClientResponse = null;
+    try {
+      keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+      volumeName = keyArgs.getVolumeName();
+      bucketName = keyArgs.getBucketName();
+
+      // TODO to support S3 ACL later.
+      acquiredBucketLock =
+          omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+              volumeName, bucketName);
+
+      validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+      // If KMS is configured and TDE is enabled on bucket, throw MPU not
+      // supported.
+      if (ozoneManager.getKmsProvider() != null) {
+        if (omMetadataManager.getBucketTable().get(
+            omMetadataManager.getBucketKey(volumeName, bucketName))
+            .getEncryptionKeyInfo() != null) {
+          throw new OMException("MultipartUpload is not yet supported on " +
+              "encrypted buckets", NOT_SUPPORTED_OPERATION);
+        }
+      }
+
+      OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest
+          .verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName,
+              keyName, Paths.get(keyName));
+
+      // check if the directory already existed in OM
+      checkDirectoryResult(keyName, pathInfoFSO.getDirectoryResult());
+
+      // add all missing parents to dir table
+      missingParentInfos = OMDirectoryCreateRequestWithFSO
+          .getAllMissingParentDirInfo(ozoneManager, keyArgs, pathInfoFSO,
+              transactionLogIndex);
+
+      // We are adding uploadId to key, because if multiple users try to
+      // perform multipart upload on the same key, each will try to upload, who
+      // ever finally commit the key, we see that key in ozone. Suppose if we
+      // don't add id, and use the same key /volume/bucket/key, when multiple
+      // users try to upload the key, we update the parts of the key's from
+      // multiple users to same key, and the key output can be a mix of the
+      // parts from multiple users.
+
+      // So on same key if multiple time multipart upload is initiated we
+      // store multiple entries in the openKey Table.
+      // Checked AWS S3, when we try to run multipart upload, each time a
+      // new uploadId is returned. And also even if a key exist when initiate
+      // multipart upload request is received, it returns multipart upload id
+      // for the key.
+
+      String multipartKey = omMetadataManager.getMultipartKey(
+          volumeName, bucketName, keyName,
+          keyArgs.getMultipartUploadID());
+
+      String multipartOpenKey = omMetadataManager
+          .getMultipartKey(pathInfoFSO.getLastKnownParentId(),
+              pathInfoFSO.getLeafNodeName(), keyArgs.getMultipartUploadID());
+
+      // Even if this key already exists in the KeyTable, it would be taken
+      // care of in the final complete multipart upload. AWS S3 behavior is
+      // also like this, even when key exists in a bucket, user can still
+      // initiate MPU.
+      final ReplicationConfig replicationConfig =
+              ReplicationConfig.fromTypeAndFactor(
+                      keyArgs.getType(), keyArgs.getFactor());
+
+      multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+          .setUploadID(keyArgs.getMultipartUploadID())
+          .setCreationTime(keyArgs.getModificationTime())
+          .setReplicationConfig(replicationConfig)
+          .setObjectID(pathInfoFSO.getLeafNodeObjectId())
+          .setUpdateID(transactionLogIndex)
+          .setParentID(pathInfoFSO.getLastKnownParentId())
+          .build();
+
+      omKeyInfo = new OmKeyInfo.Builder()
+          .setVolumeName(volumeName)
+          .setBucketName(bucketName)
+          .setKeyName(keyArgs.getKeyName())
+          .setCreationTime(keyArgs.getModificationTime())
+          .setModificationTime(keyArgs.getModificationTime())
+          .setReplicationConfig(replicationConfig)
+          .setOmKeyLocationInfos(Collections.singletonList(
+              new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+          .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()))
+          .setObjectID(pathInfoFSO.getLeafNodeObjectId())
+          .setUpdateID(transactionLogIndex)
+          .setParentObjectID(pathInfoFSO.getLastKnownParentId())
+          .build();
+
+      // Add cache entries for the prefix directories.
+      // Skip adding for the file key itself, until Key Commit.
+      OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+              Optional.absent(), Optional.of(missingParentInfos),
+              transactionLogIndex);
+
+      OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+          multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(),
+              transactionLogIndex);
+
+      // Add to cache
+      omMetadataManager.getMultipartInfoTable().addCacheEntry(
+          new CacheKey<>(multipartKey),
+          new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
+
+      omClientResponse =
+          new S3InitiateMultipartUploadResponseWithFSO(
+              omResponse.setInitiateMultiPartUploadResponse(
+                  MultipartInfoInitiateResponse.newBuilder()
+                      .setVolumeName(requestedVolume)
+                      .setBucketName(requestedBucket)
+                      .setKeyName(keyName)
+                      .setMultipartUploadID(keyArgs.getMultipartUploadID()))
+                  .build(), multipartKeyInfo, omKeyInfo, multipartKey,
+              missingParentInfos);
+
+      result = Result.SUCCESS;
+    } catch (IOException ex) {
+      result = Result.FAILURE;
+      exception = ex;
+      omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(
+          createErrorOMResponse(omResponse, exception));
+    } finally {
+      addResponseToDoubleBuffer(transactionLogIndex, omClientResponse,
+          ozoneManagerDoubleBufferHelper);
+      if (acquiredBucketLock) {
+        omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK,
+            volumeName, bucketName);
+      }
+    }
+    logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName,
+            bucketName, keyName, exception, result);
+
+    return omClientResponse;
+  }
+
+  /**
+   * Verify om directory result.
+   *
+   * @param keyName           key name
+   * @param omDirectoryResult directory result
+   * @throws OMException if file or directory or file exists in the given path
+   */
+  private void checkDirectoryResult(String keyName,
+      OMFileRequest.OMDirectoryResult omDirectoryResult) throws OMException {
+    if (omDirectoryResult == DIRECTORY_EXISTS) {
+      throw new OMException("Can not write to directory: " + keyName,
+              OMException.ResultCodes.NOT_A_FILE);
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
index 833a2a5..5d248f7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
@@ -126,8 +126,20 @@
       multipartKey = omMetadataManager.getMultipartKey(
           volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
 
+      String multipartOpenKey;
+      try {
+        multipartOpenKey =
+            getMultipartOpenKey(keyArgs.getMultipartUploadID(), volumeName,
+                bucketName, keyName, omMetadataManager);
+      } catch (OMException ome) {
+        throw new OMException(
+            "Abort Multipart Upload Failed: volume: " + requestedVolume
+                + ", bucket: " + requestedBucket + ", key: " + keyName, ome,
+            OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
+      }
+
       OmKeyInfo omKeyInfo =
-          omMetadataManager.getOpenKeyTable().get(multipartKey);
+          omMetadataManager.getOpenKeyTable().get(multipartOpenKey);
       omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
 
       // If there is no entry in openKeyTable, then there is no multipart
@@ -160,25 +172,20 @@
       // No need to add the cache entries to delete table, as the entries
       // in delete table are not used by any read/write operations.
       omMetadataManager.getOpenKeyTable().addCacheEntry(
-          new CacheKey<>(multipartKey),
+          new CacheKey<>(multipartOpenKey),
           new CacheValue<>(Optional.absent(), trxnLogIndex));
       omMetadataManager.getMultipartInfoTable().addCacheEntry(
           new CacheKey<>(multipartKey),
           new CacheValue<>(Optional.absent(), trxnLogIndex));
 
-      omClientResponse = new S3MultipartUploadAbortResponse(
-          omResponse.setAbortMultiPartUploadResponse(
-              MultipartUploadAbortResponse.newBuilder()).build(),
-          multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
-          omBucketInfo.copyObject());
+      omClientResponse = getOmClientResponse(ozoneManager, multipartKeyInfo,
+          multipartKey, multipartOpenKey, omResponse, omBucketInfo);
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse =
-          new S3MultipartUploadAbortResponse(createErrorOMResponse(omResponse,
-              exception));
+      omClientResponse = getOmClientResponse(exception, omResponse);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -212,4 +219,33 @@
 
     return omClientResponse;
   }
+
+  protected OMClientResponse getOmClientResponse(IOException exception,
+      OMResponse.Builder omResponse) {
+
+    return new S3MultipartUploadAbortResponse(createErrorOMResponse(omResponse,
+            exception));
+  }
+
+  protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
+      OmMultipartKeyInfo multipartKeyInfo, String multipartKey,
+      String multipartOpenKey, OMResponse.Builder omResponse,
+      OmBucketInfo omBucketInfo) {
+
+    OMClientResponse omClientResponse = new S3MultipartUploadAbortResponse(
+        omResponse.setAbortMultiPartUploadResponse(
+            MultipartUploadAbortResponse.newBuilder()).build(), multipartKey,
+        multipartOpenKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
+        omBucketInfo.copyObject());
+    return omClientResponse;
+  }
+
+  protected String getMultipartOpenKey(String multipartUploadID,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
+
+    String multipartKey = omMetadataManager.getMultipartKey(
+        volumeName, bucketName, keyName, multipartUploadID);
+    return multipartKey;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestWithFSO.java
new file mode 100644
index 0000000..fe4d49a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequestWithFSO.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadAbortResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+
+/**
+ * Handles Abort of multipart upload request.
+ */
+public class S3MultipartUploadAbortRequestWithFSO
+    extends S3MultipartUploadAbortRequest {
+
+  public S3MultipartUploadAbortRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  protected OMClientResponse getOmClientResponse(IOException exception,
+      OMResponse.Builder omResponse) {
+
+    return new S3MultipartUploadAbortResponseWithFSO(createErrorOMResponse(
+        omResponse, exception));
+  }
+
+  @Override
+  protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
+      OmMultipartKeyInfo multipartKeyInfo, String multipartKey,
+      String multipartOpenKey, OMResponse.Builder omResponse,
+      OmBucketInfo omBucketInfo) {
+
+    OMClientResponse omClientResp = new S3MultipartUploadAbortResponseWithFSO(
+        omResponse.setAbortMultiPartUploadResponse(
+            MultipartUploadAbortResponse.newBuilder()).build(), multipartKey,
+        multipartOpenKey, multipartKeyInfo, ozoneManager.isRatisEnabled(),
+        omBucketInfo.copyObject());
+    return omClientResp;
+  }
+
+  @Override
+  protected String getMultipartOpenKey(String multipartUploadID,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
+        keyName, omMetadataManager);
+
+    String multipartKey = omMetadataManager.getMultipartKey(parentID,
+        fileName, multipartUploadID);
+
+    return multipartKey;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index 4cd4fd4..b6485b2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -134,21 +134,21 @@
       validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
 
       String uploadID = keyArgs.getMultipartUploadID();
-      multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName,
-          keyName, uploadID);
+      multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+              omMetadataManager, uploadID);
 
       multipartKeyInfo = omMetadataManager.getMultipartInfoTable()
           .get(multipartKey);
 
       long clientID = multipartCommitUploadPartRequest.getClientID();
 
-      openKey = omMetadataManager.getOpenKey(
-          volumeName, bucketName, keyName, clientID);
+      openKey = getOpenKey(volumeName, bucketName, keyName, omMetadataManager,
+              clientID);
 
       String ozoneKey = omMetadataManager.getOzoneKey(
           volumeName, bucketName, keyName);
 
-      omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+      omKeyInfo = getOmKeyInfo(omMetadataManager, openKey, keyName);
 
       if (omKeyInfo == null) {
         throw new OMException("Failed to commit Multipart Upload key, as " +
@@ -230,20 +230,19 @@
       omResponse.setCommitMultiPartUploadResponse(
           MultipartCommitUploadPartResponse.newBuilder()
               .setPartName(partName));
-      omClientResponse = new S3MultipartUploadCommitPartResponse(
-          omResponse.build(), multipartKey, openKey,
-          multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
-          ozoneManager.isRatisEnabled(),
-          omBucketInfo.copyObject());
+      omClientResponse =
+          getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey,
+              omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(),
+              omBucketInfo.copyObject());
 
       result = Result.SUCCESS;
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new S3MultipartUploadCommitPartResponse(
-          createErrorOMResponse(omResponse, exception), multipartKey, openKey,
-          multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
-          ozoneManager.isRatisEnabled(), copyBucketInfo);
+      omClientResponse =
+          getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey,
+              omKeyInfo, multipartKey, multipartKeyInfo,
+              createErrorOMResponse(omResponse, exception), copyBucketInfo);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -253,6 +252,46 @@
       }
     }
 
+    logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs,
+            auditMap, volumeName, bucketName, keyName, exception, partName,
+            result);
+
+    return omClientResponse;
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected S3MultipartUploadCommitPartResponse getOmClientResponse(
+      OzoneManager ozoneManager,
+      OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, String openKey,
+      OmKeyInfo omKeyInfo, String multipartKey,
+      OmMultipartKeyInfo multipartKeyInfo, OMResponse build,
+      OmBucketInfo omBucketInfo) {
+
+    return new S3MultipartUploadCommitPartResponse(build, multipartKey, openKey,
+        multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
+        ozoneManager.isRatisEnabled(), omBucketInfo);
+  }
+
+  protected OmKeyInfo getOmKeyInfo(OMMetadataManager omMetadataManager,
+      String openKey, String keyName) throws IOException {
+
+    return omMetadataManager.getOpenKeyTable().get(openKey);
+  }
+
+  protected String getOpenKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, long clientID)
+      throws IOException {
+
+    return omMetadataManager
+        .getOpenKey(volumeName, bucketName, keyName, clientID);
+  }
+
+  @SuppressWarnings("parameternumber")
+  private void logResult(OzoneManager ozoneManager,
+      MultipartCommitUploadPartRequest multipartCommitUploadPartRequest,
+      KeyArgs keyArgs, Map<String, String> auditMap, String volumeName,
+      String bucketName, String keyName, IOException exception,
+      String partName, Result result) {
     // audit log
     // Add MPU related information.
     auditMap.put(OzoneConsts.MULTIPART_UPLOAD_PART_NUMBER,
@@ -278,9 +317,12 @@
       LOG.error("Unrecognized Result for S3MultipartUploadCommitPartRequest: " +
           "{}", multipartCommitUploadPartRequest);
     }
-
-    return omClientResponse;
   }
 
+  private String getMultipartKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, String uploadID) {
+    return omMetadataManager.getMultipartKey(volumeName, bucketName,
+        keyName, uploadID);
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestWithFSO.java
new file mode 100644
index 0000000..dacdb53
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequestWithFSO.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCommitPartResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCommitPartResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+
+/**
+ * Handle Multipart upload commit upload part file.
+ */
+public class S3MultipartUploadCommitPartRequestWithFSO
+        extends S3MultipartUploadCommitPartRequest {
+
+  public S3MultipartUploadCommitPartRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  protected String getOpenKey(String volumeName, String bucketName,
+      String keyName, OMMetadataManager omMetadataManager, long clientID)
+      throws IOException {
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    long parentID = OMFileRequest
+        .getParentID(bucketId, pathComponents, keyName, omMetadataManager);
+
+    return omMetadataManager.getOpenFileName(parentID, fileName, clientID);
+  }
+
+  @Override
+  protected OmKeyInfo getOmKeyInfo(OMMetadataManager omMetadataManager,
+      String openKey, String keyName) throws IOException {
+
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+        omMetadataManager, openKey, keyName);
+  }
+
+  @Override
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected S3MultipartUploadCommitPartResponse getOmClientResponse(
+      OzoneManager ozoneManager,
+      OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, String openKey,
+      OmKeyInfo omKeyInfo, String multipartKey,
+      OmMultipartKeyInfo multipartKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse build, OmBucketInfo omBucketInfo) {
+
+    return new S3MultipartUploadCommitPartResponseWithFSO(build, multipartKey,
+        openKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo,
+        ozoneManager.isRatisEnabled(), omBucketInfo);
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 3216bcf..b673296 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
+import com.google.common.base.Optional;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.StringUtils;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -27,6 +31,8 @@
 
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.OMAction;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -51,16 +57,12 @@
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.OzoneObj;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-
-import com.google.common.base.Optional;
-import org.apache.commons.codec.digest.DigestUtils;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
-import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
 /**
  * Handle Multipart upload complete request.
  */
@@ -141,17 +143,19 @@
       String ozoneKey = omMetadataManager.getOzoneKey(
           volumeName, bucketName, keyName);
 
+      String dbOzoneKey =
+          getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName);
+
+      String dbMultipartOpenKey =
+          getDBMultipartOpenKey(volumeName, bucketName, keyName, uploadID,
+              omMetadataManager);
+
       OmMultipartKeyInfo multipartKeyInfo = omMetadataManager
           .getMultipartInfoTable().get(multipartKey);
 
       // Check for directory exists with same name, if it exists throw error. 
-      if (ozoneManager.getEnableFileSystemPaths()) {
-        if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
-            omMetadataManager)) {
-          throw new OMException("Can not Complete MPU for file: " + keyName +
-              " as there is already directory in the given path", NOT_A_FILE);
-        }
-      }
+      checkDirectoryAlreadyExists(ozoneManager, volumeName, bucketName, keyName,
+          omMetadataManager);
 
       if (multipartKeyInfo == null) {
         throw new OMException(
@@ -172,130 +176,25 @@
         }
 
         // First Check for Invalid Part Order.
-        int prevPartNumber = partsList.get(0).getPartNumber();
         List< Integer > partNumbers = new ArrayList<>();
-        int partsListSize = partsList.size();
-        partNumbers.add(prevPartNumber);
-        for (int i = 1; i < partsListSize; i++) {
-          int currentPartNumber = partsList.get(i).getPartNumber();
-          if (prevPartNumber >= currentPartNumber) {
-            LOG.error("PartNumber at index {} is {}, and its previous " +
-                    "partNumber at index {} is {} for ozonekey is " +
-                    "{}", i, currentPartNumber, i - 1, prevPartNumber,
-                ozoneKey);
-            throw new OMException(
-                failureMessage(requestedVolume, requestedBucket, keyName) +
-                " because parts are in Invalid order.",
-                OMException.ResultCodes.INVALID_PART_ORDER);
-          }
-          prevPartNumber = currentPartNumber;
-          partNumbers.add(prevPartNumber);
-        }
-
+        int partsListSize = getPartsListSize(requestedVolume,
+                requestedBucket, keyName, ozoneKey, partNumbers, partsList);
 
         List<OmKeyLocationInfo> partLocationInfos = new ArrayList<>();
-        long dataSize = 0;
-        int currentPartCount = 0;
-        // Now do actual logic, and check for any Invalid part during this.
-        for (OzoneManagerProtocolProtos.Part part : partsList) {
-          currentPartCount++;
-          int partNumber = part.getPartNumber();
-          String partName = part.getPartName();
-
-          PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
-
-          if (partKeyInfo == null ||
-              !partName.equals(partKeyInfo.getPartName())) {
-            String omPartName = partKeyInfo == null ? null :
-                partKeyInfo.getPartName();
-            throw new OMException(
-                failureMessage(requestedVolume, requestedBucket, keyName) +
-                ". Provided Part info is { " + partName + ", " + partNumber +
-                "}, whereas OM has partName " + omPartName,
-                OMException.ResultCodes.INVALID_PART);
-          }
-
-          OmKeyInfo currentPartKeyInfo = OmKeyInfo
-              .getFromProtobuf(partKeyInfo.getPartKeyInfo());
-
-          // Except for last part all parts should have minimum size.
-          if (currentPartCount != partsListSize) {
-            if (currentPartKeyInfo.getDataSize() <
-                ozoneManager.getMinMultipartUploadPartSize()) {
-              LOG.error("MultipartUpload: {} Part number: {} size {}  is less" +
-                      " than minimum part size {}", ozoneKey,
-                  partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(),
-                  ozoneManager.getMinMultipartUploadPartSize());
-              throw new OMException(
-                  failureMessage(requestedVolume, requestedBucket, keyName) +
-                  ". Entity too small.",
-                  OMException.ResultCodes.ENTITY_TOO_SMALL);
-            }
-          }
-
-          // As all part keys will have only one version.
-          OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
-              .getKeyLocationVersions().get(0);
-
-          // Set partNumber in each block.
-          currentKeyInfoGroup.getLocationList().forEach(
-              omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
-
-          partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
-          dataSize += currentPartKeyInfo.getDataSize();
-        }
+        long dataSize = getMultipartDataSize(requestedVolume, requestedBucket,
+                keyName, ozoneKey, partKeyInfoMap, partsListSize,
+                partLocationInfos, partsList, ozoneManager);
 
         // All parts have same replication information. Here getting from last
         // part.
-        HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
-            .getPartKeyInfo().getType();
-        HddsProtos.ReplicationFactor factor =
-            partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
-
-        OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
-        if (omKeyInfo == null) {
-          // This is a newly added key, it does not have any versions.
-          OmKeyLocationInfoGroup keyLocationInfoGroup = new
-              OmKeyLocationInfoGroup(0, partLocationInfos, true);
-
-          // Get the objectID of the key from OpenKeyTable
-          OmKeyInfo dbOpenKeyInfo = omMetadataManager.getOpenKeyTable()
-              .get(multipartKey);
-
-          // A newly created key, this is the first version.
-          OmKeyInfo.Builder builder =
-              new OmKeyInfo.Builder().setVolumeName(volumeName)
-              .setBucketName(bucketName).setKeyName(keyName)
-              .setReplicationConfig(
-                      ReplicationConfig.fromTypeAndFactor(type, factor))
-              .setCreationTime(keyArgs.getModificationTime())
-              .setModificationTime(keyArgs.getModificationTime())
-              .setDataSize(dataSize)
-              .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
-              .setOmKeyLocationInfos(
-                  Collections.singletonList(keyLocationInfoGroup))
-              .setAcls(dbOpenKeyInfo.getAcls());
-          // Check if db entry has ObjectID. This check is required because
-          // it is possible that between multipart key uploads and complete,
-          // we had an upgrade.
-          if (dbOpenKeyInfo.getObjectID() != 0) {
-            builder.setObjectID(dbOpenKeyInfo.getObjectID());
-          }
-          omKeyInfo = builder.build();
-        } else {
-          // Already a version exists, so we should add it as a new version.
-          // But now as versioning is not supported, just following the commit
-          // key approach. When versioning support comes, then we can uncomment
-          // below code keyInfo.addNewVersion(locations);
-          omKeyInfo.updateLocationInfoList(partLocationInfos, true, true);
-          omKeyInfo.setModificationTime(keyArgs.getModificationTime());
-          omKeyInfo.setDataSize(dataSize);
-        }
-        omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+        OmKeyInfo omKeyInfo =
+            getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs, volumeName,
+                bucketName, keyName, dbMultipartOpenKey, omMetadataManager,
+                dbOzoneKey, partKeyInfoMap, partLocationInfos, dataSize);
 
         //Find all unused parts.
-        List< OmKeyInfo > unUsedParts = new ArrayList<>();
-        for (Map.Entry< Integer, PartKeyInfo > partKeyInfo :
+        List<OmKeyInfo> unUsedParts = new ArrayList<>();
+        for (Map.Entry< Integer, PartKeyInfo> partKeyInfo :
             partKeyInfoMap.entrySet()) {
           if (!partNumbers.contains(partKeyInfo.getKey())) {
             unUsedParts.add(OmKeyInfo
@@ -303,8 +202,8 @@
           }
         }
 
-        updateCache(omMetadataManager, ozoneKey, multipartKey, omKeyInfo,
-            trxnLogIndex);
+        updateCache(omMetadataManager, dbOzoneKey, dbMultipartOpenKey,
+            multipartKey, omKeyInfo, trxnLogIndex);
 
         omResponse.setCompleteMultiPartUploadResponse(
             MultipartUploadCompleteResponse.newBuilder()
@@ -313,8 +212,9 @@
                 .setKey(keyName)
                 .setHash(DigestUtils.sha256Hex(keyName)));
 
-        omClientResponse = new S3MultipartUploadCompleteResponse(
-            omResponse.build(), multipartKey, omKeyInfo, unUsedParts);
+        omClientResponse =
+            getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey,
+                omKeyInfo, unUsedParts);
 
         result = Result.SUCCESS;
       } else {
@@ -327,8 +227,7 @@
     } catch (IOException ex) {
       result = Result.FAILURE;
       exception = ex;
-      omClientResponse = new S3MultipartUploadCompleteResponse(
-          createErrorOMResponse(omResponse, exception));
+      omClientResponse = getOmClientResponse(omResponse, exception);
     } finally {
       addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
           omDoubleBufferHelper);
@@ -338,6 +237,46 @@
       }
     }
 
+    logResult(ozoneManager, multipartUploadCompleteRequest, partsList,
+            auditMap, volumeName, bucketName, keyName, exception, result);
+
+    return omClientResponse;
+  }
+
+  protected S3MultipartUploadCompleteResponse getOmClientResponse(
+      OMResponse.Builder omResponse, IOException exception) {
+    return new S3MultipartUploadCompleteResponse(
+        createErrorOMResponse(omResponse, exception));
+  }
+
+  protected OMClientResponse getOmClientResponse(String multipartKey,
+      OMResponse.Builder omResponse, String dbMultipartOpenKey,
+      OmKeyInfo omKeyInfo, List<OmKeyInfo> unUsedParts) {
+
+    return new S3MultipartUploadCompleteResponse(omResponse.build(),
+        multipartKey, dbMultipartOpenKey, omKeyInfo, unUsedParts);
+  }
+
+  protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
+    if (ozoneManager.getEnableFileSystemPaths()) {
+      if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName,
+              omMetadataManager)) {
+        throw new OMException("Can not Complete MPU for file: " + keyName +
+                " as there is already directory in the given path",
+                NOT_A_FILE);
+      }
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected void logResult(OzoneManager ozoneManager,
+      MultipartUploadCompleteRequest multipartUploadCompleteRequest,
+      List<OzoneManagerProtocolProtos.Part> partsList,
+      Map<String, String> auditMap, String volumeName,
+      String bucketName, String keyName, IOException exception,
+      Result result) {
     auditMap.put(OzoneConsts.MULTIPART_LIST, partsList.toString()
         .replaceAll("\\n", " "));
 
@@ -361,8 +300,185 @@
       LOG.error("Unrecognized Result for S3MultipartUploadCommitRequest: {}",
           multipartUploadCompleteRequest);
     }
+  }
 
-    return omClientResponse;
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex,
+      KeyArgs keyArgs, String volumeName, String bucketName, String keyName,
+      String multipartOpenKey, OMMetadataManager omMetadataManager,
+      String ozoneKey, TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+      List<OmKeyLocationInfo> partLocationInfos, long dataSize)
+          throws IOException {
+    HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
+        .getPartKeyInfo().getType();
+    HddsProtos.ReplicationFactor factor =
+        partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
+
+    OmKeyInfo omKeyInfo = getOmKeyInfoFromKeyTable(ozoneKey, keyName,
+            omMetadataManager);
+    if (omKeyInfo == null) {
+      // This is a newly added key, it does not have any versions.
+      OmKeyLocationInfoGroup keyLocationInfoGroup = new
+          OmKeyLocationInfoGroup(0, partLocationInfos, true);
+
+      // Get the objectID of the key from OpenKeyTable
+      OmKeyInfo dbOpenKeyInfo = getOmKeyInfoFromOpenKeyTable(multipartOpenKey,
+              keyName, omMetadataManager);
+
+      // A newly created key, this is the first version.
+      OmKeyInfo.Builder builder =
+          new OmKeyInfo.Builder().setVolumeName(volumeName)
+          .setBucketName(bucketName).setKeyName(dbOpenKeyInfo.getKeyName())
+          .setReplicationConfig(
+              ReplicationConfig.fromTypeAndFactor(type, factor))
+          .setCreationTime(keyArgs.getModificationTime())
+          .setModificationTime(keyArgs.getModificationTime())
+          .setDataSize(dataSize)
+          .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
+          .setOmKeyLocationInfos(
+              Collections.singletonList(keyLocationInfoGroup))
+          .setAcls(dbOpenKeyInfo.getAcls());
+      // Check if db entry has ObjectID. This check is required because
+      // it is possible that between multipart key uploads and complete,
+      // we had an upgrade.
+      if (dbOpenKeyInfo.getObjectID() != 0) {
+        builder.setObjectID(dbOpenKeyInfo.getObjectID());
+      }
+      updatePrefixFSOInfo(dbOpenKeyInfo, builder);
+      omKeyInfo = builder.build();
+    } else {
+      // Already a version exists, so we should add it as a new version.
+      // But now as versioning is not supported, just following the commit
+      // key approach. When versioning support comes, then we can uncomment
+      // below code keyInfo.addNewVersion(locations);
+      omKeyInfo.updateLocationInfoList(partLocationInfos, true, true);
+      omKeyInfo.setModificationTime(keyArgs.getModificationTime());
+      omKeyInfo.setDataSize(dataSize);
+    }
+    omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+    return omKeyInfo;
+  }
+
+  protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo,
+      OmKeyInfo.Builder builder) {
+    // FSO is disabled. Do nothing.
+  }
+
+  protected String getDBOzoneKey(OMMetadataManager omMetadataManager,
+      String volumeName, String bucketName, String keyName) throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
+  }
+
+  protected String getDBMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String uploadID, OMMetadataManager omMetadataManager)
+      throws IOException {
+    return omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, uploadID);
+  }
+
+  protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return omMetadataManager.getKeyTable().get(dbOzoneKey);
+  }
+
+  protected OmKeyInfo getOmKeyInfoFromOpenKeyTable(String dbMultipartKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return omMetadataManager.getOpenKeyTable().get(dbMultipartKey);
+  }
+
+  protected void addKeyTableCacheEntry(OMMetadataManager omMetadataManager,
+      String dbOzoneKey, OmKeyInfo omKeyInfo, long transactionLogIndex) {
+
+    // Add key entry to file table.
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(dbOzoneKey),
+        new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
+  }
+
+  private int getPartsListSize(String requestedVolume,
+      String requestedBucket, String keyName, String ozoneKey,
+      List<Integer> partNumbers,
+      List<OzoneManagerProtocolProtos.Part> partsList) throws OMException {
+    int prevPartNumber = partsList.get(0).getPartNumber();
+    int partsListSize = partsList.size();
+    partNumbers.add(prevPartNumber);
+    for (int i = 1; i < partsListSize; i++) {
+      int currentPartNumber = partsList.get(i).getPartNumber();
+      if (prevPartNumber >= currentPartNumber) {
+        LOG.error("PartNumber at index {} is {}, and its previous " +
+                "partNumber at index {} is {} for ozonekey is " +
+                "{}", i, currentPartNumber, i - 1, prevPartNumber,
+            ozoneKey);
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName) +
+            " because parts are in Invalid order.",
+            OMException.ResultCodes.INVALID_PART_ORDER);
+      }
+      prevPartNumber = currentPartNumber;
+      partNumbers.add(prevPartNumber);
+    }
+    return partsListSize;
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private long getMultipartDataSize(String requestedVolume,
+      String requestedBucket, String keyName, String ozoneKey,
+      TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+      int partsListSize, List<OmKeyLocationInfo> partLocationInfos,
+      List<OzoneManagerProtocolProtos.Part> partsList,
+      OzoneManager ozoneManager) throws OMException {
+    long dataSize = 0;
+    int currentPartCount = 0;
+    // Now do actual logic, and check for any Invalid part during this.
+    for (OzoneManagerProtocolProtos.Part part : partsList) {
+      currentPartCount++;
+      int partNumber = part.getPartNumber();
+      String partName = part.getPartName();
+
+      PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
+
+      String dbPartName = null;
+      if (partKeyInfo != null) {
+        dbPartName = partKeyInfo.getPartName();
+      }
+      if (!StringUtils.equals(partName, dbPartName)) {
+        String omPartName = partKeyInfo == null ? null : dbPartName;
+        throw new OMException(
+            failureMessage(requestedVolume, requestedBucket, keyName) +
+            ". Provided Part info is { " + partName + ", " + partNumber +
+            "}, whereas OM has partName " + omPartName,
+            OMException.ResultCodes.INVALID_PART);
+      }
+
+      OmKeyInfo currentPartKeyInfo = OmKeyInfo
+          .getFromProtobuf(partKeyInfo.getPartKeyInfo());
+
+      // Except for last part all parts should have minimum size.
+      if (currentPartCount != partsListSize) {
+        if (currentPartKeyInfo.getDataSize() <
+            ozoneManager.getMinMultipartUploadPartSize()) {
+          LOG.error("MultipartUpload: {} Part number: {} size {}  is less" +
+                  " than minimum part size {}", ozoneKey,
+              partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(),
+              ozoneManager.getMinMultipartUploadPartSize());
+          throw new OMException(
+              failureMessage(requestedVolume, requestedBucket, keyName) +
+                  ". Entity too small.",
+              OMException.ResultCodes.ENTITY_TOO_SMALL);
+        }
+      }
+
+      // As all part keys will have only one version.
+      OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
+          .getKeyLocationVersions().get(0);
+
+      // Set partNumber in each block.
+      currentKeyInfoGroup.getLocationList().forEach(
+          omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
+
+      partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
+      dataSize += currentPartKeyInfo.getDataSize();
+    }
+    return dataSize;
   }
 
   private static String failureMessage(String volume, String bucket,
@@ -372,20 +488,19 @@
   }
 
   private void updateCache(OMMetadataManager omMetadataManager,
-      String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo,
-      long transactionLogIndex) {
+      String dbOzoneKey, String dbMultipartOpenKey, String dbMultipartKey,
+      OmKeyInfo omKeyInfo, long transactionLogIndex) {
     // Update cache.
     // 1. Add key entry to key table.
     // 2. Delete multipartKey entry from openKeyTable and multipartInfo table.
-    omMetadataManager.getKeyTable().addCacheEntry(
-        new CacheKey<>(ozoneKey),
-        new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
+    addKeyTableCacheEntry(omMetadataManager, dbOzoneKey, omKeyInfo,
+        transactionLogIndex);
 
     omMetadataManager.getOpenKeyTable().addCacheEntry(
-        new CacheKey<>(multipartKey),
+        new CacheKey<>(dbMultipartOpenKey),
         new CacheValue<>(Optional.absent(), transactionLogIndex));
     omMetadataManager.getMultipartInfoTable().addCacheEntry(
-        new CacheKey<>(multipartKey),
+        new CacheKey<>(dbMultipartKey),
         new CacheValue<>(Optional.absent(), transactionLogIndex));
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java
new file mode 100644
index 0000000..12539dc
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse;
+import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS;
+
+/**
+ * Handle Multipart upload complete request.
+ */
+public class S3MultipartUploadCompleteRequestWithFSO
+        extends S3MultipartUploadCompleteRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3MultipartUploadCompleteRequestWithFSO.class);
+
+  public S3MultipartUploadCompleteRequestWithFSO(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  @Override
+  protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager,
+      String volumeName, String bucketName, String keyName,
+      OMMetadataManager omMetadataManager) throws IOException {
+
+    Path keyPath = Paths.get(keyName);
+    OMFileRequest.OMPathInfoWithFSO pathInfoFSO =
+        OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+            volumeName, bucketName, keyName, keyPath);
+    // Check for directory exists with same name, if it exists throw error.
+    if (pathInfoFSO.getDirectoryResult() == DIRECTORY_EXISTS) {
+      throw new OMException("Can not Complete MPU for file: " + keyName +
+          " as there is already directory in the given path",
+          NOT_A_FILE);
+    }
+  }
+
+  @Override
+  protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneFileKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return OMFileRequest.getOmKeyInfoFromFileTable(false,
+            omMetadataManager, dbOzoneFileKey, keyName);
+  }
+
+  @Override
+  protected OmKeyInfo getOmKeyInfoFromOpenKeyTable(String dbMultipartKey,
+      String keyName, OMMetadataManager omMetadataManager) throws IOException {
+    return OMFileRequest.getOmKeyInfoFromFileTable(true,
+            omMetadataManager, dbMultipartKey, keyName);
+  }
+
+  @Override
+  protected void addKeyTableCacheEntry(OMMetadataManager omMetadataManager,
+      String ozoneKey, OmKeyInfo omKeyInfo, long transactionLogIndex) {
+
+    // Add key entry to file table.
+    OMFileRequest.addFileTableCacheEntry(omMetadataManager, ozoneKey, omKeyInfo,
+        omKeyInfo.getFileName(), transactionLogIndex);
+  }
+
+  @Override
+  protected void updatePrefixFSOInfo(OmKeyInfo dbOpenKeyInfo,
+                                     OmKeyInfo.Builder builder) {
+    // updates parentID and fileName
+    builder.setParentObjectID(dbOpenKeyInfo.getParentObjectID());
+    builder.setFileName(dbOpenKeyInfo.getFileName());
+  }
+
+  @Override
+  protected String getDBOzoneKey(OMMetadataManager omMetadataManager,
+      String volumeName, String bucketName, String keyName)throws IOException {
+
+    long parentId =
+        getParentId(omMetadataManager, volumeName, bucketName, keyName);
+
+    String fileName = keyName;
+    Path filePath = Paths.get(keyName).getFileName();
+    if (filePath != null) {
+      fileName = filePath.toString();
+    }
+
+    return omMetadataManager.getOzonePathKey(parentId, fileName);
+  }
+
+  @Override
+  protected String getDBMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String uploadID, OMMetadataManager omMetadataManager)
+      throws IOException {
+
+    long parentId =
+        getParentId(omMetadataManager, volumeName, bucketName, keyName);
+
+    String fileName = keyName;
+    Path filePath = Paths.get(keyName).getFileName();
+    if (filePath != null) {
+      fileName = filePath.toString();
+    }
+
+    return omMetadataManager.getMultipartKey(parentId, fileName, uploadID);
+  }
+
+  @Override
+  protected S3MultipartUploadCompleteResponse getOmClientResponse(
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      IOException exception) {
+
+    return new S3MultipartUploadCompleteResponseWithFSO(
+        createErrorOMResponse(omResponse, exception));
+  }
+
+  @Override
+  protected OMClientResponse getOmClientResponse(String multipartKey,
+      OzoneManagerProtocolProtos.OMResponse.Builder omResponse,
+      String dbMultipartOpenKey, OmKeyInfo omKeyInfo,
+      List<OmKeyInfo> unUsedParts) {
+
+    return new S3MultipartUploadCompleteResponseWithFSO(omResponse.build(),
+        multipartKey, dbMultipartOpenKey, omKeyInfo, unUsedParts);
+  }
+
+  private long getParentId(OMMetadataManager omMetadataManager,
+      String volumeName, String bucketName, String keyName) throws IOException {
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketId = omBucketInfo.getObjectID();
+    Iterator<Path> pathComponents = Paths.get(keyName).iterator();
+    return OMFileRequest
+        .getParentID(bucketId, pathComponents, keyName, omMetadataManager);
+  }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseWithFSO.java
new file mode 100644
index 0000000..be5275f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseWithFSO.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+
+/**
+ * Response for create directory request.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE})
+public class OMDirectoryCreateResponseWithFSO extends OMClientResponse {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(OMDirectoryCreateResponseWithFSO.class);
+
+  private OmDirectoryInfo dirInfo;
+  private List<OmDirectoryInfo> parentDirInfos;
+  private Result result;
+
+  public OMDirectoryCreateResponseWithFSO(@Nonnull OMResponse omResponse,
+                                     @Nonnull OmDirectoryInfo dirInfo,
+                                     @Nonnull List<OmDirectoryInfo> pDirInfos,
+                                     @Nonnull Result result) {
+    super(omResponse);
+    this.dirInfo = dirInfo;
+    this.parentDirInfos = pDirInfos;
+    this.result = result;
+  }
+
+  /**
+   * For when the request is not successful or the directory already exists.
+   */
+  public OMDirectoryCreateResponseWithFSO(@Nonnull OMResponse omResponse,
+                                     @Nonnull Result result) {
+    super(omResponse);
+    this.result = result;
+  }
+
+  @Override
+  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+                              BatchOperation batchOperation)
+          throws IOException {
+    addToDirectoryTable(omMetadataManager, batchOperation);
+  }
+
+  private void addToDirectoryTable(OMMetadataManager omMetadataManager,
+                                BatchOperation batchOperation)
+          throws IOException {
+    if (dirInfo != null) {
+      if (parentDirInfos != null) {
+        for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+          String parentKey = omMetadataManager
+                  .getOzonePathKey(parentDirInfo.getParentObjectID(),
+                          parentDirInfo.getName());
+          LOG.debug("putWithBatch parent : dir {} info : {}", parentKey,
+                  parentDirInfo);
+          omMetadataManager.getDirectoryTable()
+                  .putWithBatch(batchOperation, parentKey, parentDirInfo);
+        }
+      }
+
+      String dirKey = omMetadataManager.getOzonePathKey(
+              dirInfo.getParentObjectID(), dirInfo.getName());
+      omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, dirKey,
+              dirInfo);
+    } else {
+      // When directory already exists, we don't add it to cache. And it is
+      // not an error, in this case dirKeyInfo will be null.
+      LOG.debug("Response Status is OK, dirKeyInfo is null in " +
+              "OMDirectoryCreateResponseWithFSO");
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseWithFSO.java
new file mode 100644
index 0000000..42c256c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseWithFSO.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for create file request - prefix layout.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE})
+public class OMFileCreateResponseWithFSO extends OMFileCreateResponse {
+
+  private List<OmDirectoryInfo> parentDirInfos;
+
+  public OMFileCreateResponseWithFSO(@Nonnull OMResponse omResponse,
+                                @Nonnull OmKeyInfo omKeyInfo,
+                                @Nonnull List<OmDirectoryInfo> parentDirInfos,
+                                long openKeySessionID,
+                                @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID,
+        omBucketInfo);
+    this.parentDirInfos = parentDirInfos;
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMFileCreateResponseWithFSO(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataMgr,
+                              BatchOperation batchOp) throws IOException {
+
+    /**
+     * Create parent directory entries during Key Create - do not wait
+     * for Key Commit request.
+     * XXX handle stale directory entries.
+     */
+    if (parentDirInfos != null) {
+      for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+        String parentKey = parentDirInfo.getPath();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("putWithBatch adding parent : key {} info : {}", parentKey,
+                  parentDirInfo);
+        }
+        omMetadataMgr.getDirectoryTable().putWithBatch(batchOp, parentKey,
+                parentDirInfo);
+      }
+    }
+
+    OMFileRequest.addToOpenFileTable(omMetadataMgr, batchOp, getOmKeyInfo(),
+            getOpenKeySessionID());
+
+    // update bucket usedBytes.
+    omMetadataMgr.getBucketTable().putWithBatch(batchOp,
+            omMetadataMgr.getBucketKey(getOmKeyInfo().getVolumeName(),
+                    getOmKeyInfo().getBucketName()), getOmBucketInfo());
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java
index 9392f7e..51118c3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java
@@ -100,6 +100,50 @@
     }
   }
 
+  /**
+   *  This method is used for FSO file deletes.
+   *  Since a common deletedTable is used ,it requires to add  key in the
+   *  full format (vol/buck/key). This method deletes the key from
+   *  file table (which is in prefix format) and adds the fullKey
+   *  into the deletedTable
+   * @param keyName     (format: objectId/key)
+   * @param fullKeyName (format: vol/buck/key)
+   * @param omKeyInfo
+   * @throws IOException
+   */
+  protected void addDeletionToBatch(
+      OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation,
+      Table<String, ?> fromTable,
+      String keyName, String fullKeyName,
+      OmKeyInfo omKeyInfo) throws IOException {
+
+    // For OmResponse with failure, this should do nothing. This method is
+    // not called in failure scenario in OM code.
+    fromTable.deleteWithBatch(batchOperation, keyName);
+
+    // If Key is not empty add this to delete table.
+    if (!isKeyEmpty(omKeyInfo)) {
+      // If a deleted key is put in the table where a key with the same
+      // name already exists, then the old deleted key information would be
+      // lost. To avoid this, first check if a key with same name exists.
+      // deletedTable in OM Metadata stores <KeyName, RepeatedOMKeyInfo>.
+      // The RepeatedOmKeyInfo is the structure that allows us to store a
+      // list of OmKeyInfo that can be tied to same key name. For a keyName
+      // if RepeatedOMKeyInfo structure is null, we create a new instance,
+      // if it is not null, then we simply add to the list and store this
+      // instance in deletedTable.
+      RepeatedOmKeyInfo repeatedOmKeyInfo =
+          omMetadataManager.getDeletedTable().get(fullKeyName);
+      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+          omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(),
+          isRatisEnabled);
+      omMetadataManager.getDeletedTable().putWithBatch(
+          batchOperation, fullKeyName, repeatedOmKeyInfo);
+    }
+  }
+
+
   @Override
   public abstract void addToDBBatch(OMMetadataManager omMetadataManager,
         BatchOperation batchOperation) throws IOException;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
index 4b20853..c97d702 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java
@@ -74,4 +74,16 @@
         omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
             omKeyInfo.getBucketName()), omBucketInfo);
   }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
+
+  protected long getClientID() {
+    return clientID;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseWithFSO.java
new file mode 100644
index 0000000..9f2a800
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponseWithFSO.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for AllocateBlock request - prefix layout.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE})
+public class OMAllocateBlockResponseWithFSO extends OMAllocateBlockResponse {
+
+  public OMAllocateBlockResponseWithFSO(@Nonnull OMResponse omResponse,
+      @Nonnull OmKeyInfo omKeyInfo, long clientID,
+      @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, clientID, omBucketInfo);
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMAllocateBlockResponseWithFSO(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
+            getOmKeyInfo(), getClientID());
+
+    // update bucket usedBytes.
+    omMetadataManager.getBucketTable().putWithBatch(batchOperation,
+            omMetadataManager.getBucketKey(getOmKeyInfo().getVolumeName(),
+                    getOmKeyInfo().getBucketName()), getOmBucketInfo());
+  }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
index 5d43b27..ebd3263 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
@@ -79,4 +79,19 @@
             omBucketInfo.getBucketName()), omBucketInfo);
   }
 
+  protected String getOpenKeyName() {
+    return openKeyName;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
+
+  protected String getOzoneKeyName() {
+    return ozoneKeyName;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java
new file mode 100644
index 0000000..3b8bba1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseWithFSO.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+
+/**
+ * Response for CommitKey request - prefix layout1.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE})
+public class OMKeyCommitResponseWithFSO extends OMKeyCommitResponse {
+
+  public OMKeyCommitResponseWithFSO(@Nonnull OMResponse omResponse,
+                               @Nonnull OmKeyInfo omKeyInfo,
+                               String ozoneKeyName, String openKeyName,
+                               @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, ozoneKeyName, openKeyName,
+            omBucketInfo);
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMKeyCommitResponseWithFSO(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+    checkStatusNotOK();
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+                           BatchOperation batchOperation) throws IOException {
+
+    // Delete from OpenKey table
+    omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
+            getOpenKeyName());
+
+    OMFileRequest.addToFileTable(omMetadataManager, batchOperation,
+            getOmKeyInfo());
+
+    // update bucket usedBytes.
+    omMetadataManager.getBucketTable().putWithBatch(batchOperation,
+            omMetadataManager.getBucketKey(getOmBucketInfo().getVolumeName(),
+                    getOmBucketInfo().getBucketName()), getOmBucketInfo());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
index 98b1927..d170ef4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
@@ -69,7 +69,7 @@
   }
 
   @Override
-  protected void addToDBBatch(OMMetadataManager omMetadataManager,
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
     /**
@@ -101,5 +101,17 @@
         omMetadataManager.getBucketKey(omKeyInfo.getVolumeName(),
             omKeyInfo.getBucketName()), omBucketInfo);
   }
+
+  protected long getOpenKeySessionID() {
+    return openKeySessionID;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseWithFSO.java
new file mode 100644
index 0000000..f9f9567
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponseWithFSO.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for CreateKey request - prefix layout.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE})
+public class OMKeyCreateResponseWithFSO extends OMFileCreateResponseWithFSO {
+
+  public OMKeyCreateResponseWithFSO(@Nonnull OMResponse omResponse,
+                               @Nonnull OmKeyInfo omKeyInfo,
+                               @Nonnull List<OmDirectoryInfo> parentDirInfos,
+                               long openKeySessionID,
+                               @Nonnull OmBucketInfo omBucketInfo) {
+    super(omResponse, omKeyInfo, parentDirInfos, openKeySessionID,
+            omBucketInfo);
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMKeyCreateResponseWithFSO(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
index 58785c0..868d8c9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
@@ -75,4 +75,12 @@
         omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(),
             omBucketInfo.getBucketName()), omBucketInfo);
   }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java
new file mode 100644
index 0000000..8477653
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseWithFSO.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+
+/**
+ * Response for DeleteKey request.
+ */
+@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE,
+    DELETED_TABLE, DELETED_DIR_TABLE})
+public class OMKeyDeleteResponseWithFSO extends OMKeyDeleteResponse {
+
+  private boolean isDeleteDirectory;
+  private String keyName;
+
+  public OMKeyDeleteResponseWithFSO(@Nonnull OMResponse omResponse,
+      @Nonnull String keyName, @Nonnull OmKeyInfo omKeyInfo,
+      boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo,
+      @Nonnull boolean isDeleteDirectory) {
+    super(omResponse, omKeyInfo, isRatisEnabled, omBucketInfo);
+    this.keyName = keyName;
+    this.isDeleteDirectory = isDeleteDirectory;
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMKeyDeleteResponseWithFSO(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    // For OmResponse with failure, this should do nothing. This method is
+    // not called in failure scenario in OM code.
+    String ozoneDbKey = omMetadataManager.getOzonePathKey(
+            getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName());
+
+    if (isDeleteDirectory) {
+      omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+              ozoneDbKey);
+      OmKeyInfo omKeyInfo = getOmKeyInfo();
+      // Sets full absolute key name to OmKeyInfo, which is
+      // required for moving the sub-files to KeyDeletionService.
+      omKeyInfo.setKeyName(keyName);
+      omMetadataManager.getDeletedDirTable().putWithBatch(
+          batchOperation, ozoneDbKey, omKeyInfo);
+    } else {
+      Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable();
+      OmKeyInfo omKeyInfo = getOmKeyInfo();
+      // Sets full absolute key name to OmKeyInfo, which is
+      // required for moving the sub-files to KeyDeletionService.
+      omKeyInfo.setKeyName(keyName);
+      String deletedKey = omMetadataManager
+          .getOzoneKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(),
+              omKeyInfo.getKeyName());
+      addDeletionToBatch(omMetadataManager, batchOperation, keyTable,
+          ozoneDbKey, deletedKey, omKeyInfo);
+    }
+
+    // update bucket usedBytes.
+    omMetadataManager.getBucketTable().putWithBatch(batchOperation,
+            omMetadataManager.getBucketKey(getOmBucketInfo().getVolumeName(),
+                    getOmBucketInfo().getBucketName()), getOmBucketInfo());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
index 7470b37..3b7edf1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
@@ -70,4 +70,15 @@
         renameKeyInfo);
   }
 
+  public OmKeyInfo getRenameKeyInfo() {
+    return renameKeyInfo;
+  }
+
+  public String getFromKeyName() {
+    return fromKeyName;
+  }
+
+  public String getToKeyName() {
+    return toKeyName;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java
new file mode 100644
index 0000000..f1dd69e
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseWithFSO.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.*;
+
+/**
+ * Response for RenameKey request - prefix layout.
+ */
+@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE})
+public class OMKeyRenameResponseWithFSO extends OMKeyRenameResponse {
+
+  private boolean isRenameDirectory;
+
+  public OMKeyRenameResponseWithFSO(@Nonnull OMResponse omResponse,
+      String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo,
+      boolean isRenameDirectory) {
+    super(omResponse, fromKeyName, toKeyName, renameKeyInfo);
+    this.isRenameDirectory = isRenameDirectory;
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public OMKeyRenameResponseWithFSO(@Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+                           BatchOperation batchOperation) throws IOException {
+
+    if (isRenameDirectory) {
+      omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+              getFromKeyName());
+
+      OmDirectoryInfo renameDirInfo =
+              OMFileRequest.getDirectoryInfo(getRenameKeyInfo());
+      omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
+              getToKeyName(), renameDirInfo);
+
+    } else {
+      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
+              getFromKeyName());
+      omMetadataManager.getKeyTable().putWithBatch(batchOperation,
+              getToKeyName(), getRenameKeyInfo());
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMPathsPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMPathsPurgeResponseWithFSO.java
new file mode 100644
index 0000000..0548039
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMPathsPurgeResponseWithFSO.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.key.OMPathsPurgeRequestWithFSO;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+
+/**
+ * Response for {@link OMPathsPurgeRequestWithFSO} request.
+ */
+@CleanupTableInfo(cleanupTables = {DELETED_TABLE, DELETED_DIR_TABLE,
+    DIRECTORY_TABLE, FILE_TABLE})
+public class OMPathsPurgeResponseWithFSO extends OMClientResponse {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMPathsPurgeResponseWithFSO.class);
+
+  private List<OzoneManagerProtocolProtos.KeyInfo> markDeletedDirList;
+  private List<String> dirList;
+  private List<OzoneManagerProtocolProtos.KeyInfo> fileList;
+  private boolean isRatisEnabled;
+
+
+  public OMPathsPurgeResponseWithFSO(@Nonnull OMResponse omResponse,
+      @Nonnull List<OzoneManagerProtocolProtos.KeyInfo> markDeletedDirs,
+      @Nonnull List<OzoneManagerProtocolProtos.KeyInfo> files,
+      @Nonnull List<String> dirs, boolean isRatisEnabled) {
+    super(omResponse);
+    this.markDeletedDirList = markDeletedDirs;
+    this.dirList = dirs;
+    this.fileList = files;
+    this.isRatisEnabled = isRatisEnabled;
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    // Add all sub-directories to deleted directory table.
+    for (OzoneManagerProtocolProtos.KeyInfo key : markDeletedDirList) {
+      OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(key);
+      String ozoneDbKey = omMetadataManager.getOzonePathKey(
+          keyInfo.getParentObjectID(), keyInfo.getFileName());
+      omMetadataManager.getDeletedDirTable().putWithBatch(batchOperation,
+          ozoneDbKey, keyInfo);
+
+      omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+          ozoneDbKey);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("markDeletedDirList KeyName: {}, DBKey: {}",
+            keyInfo.getKeyName(), ozoneDbKey);
+      }
+    }
+
+    // Delete all the visited directories from deleted directory table
+    for (String key : dirList) {
+      omMetadataManager.getDeletedDirTable().deleteWithBatch(batchOperation,
+          key);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.info("Purge Deleted Directory DBKey: {}", key);
+      }
+    }
+    for (OzoneManagerProtocolProtos.KeyInfo key : fileList) {
+      OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(key);
+      String ozoneDbKey = omMetadataManager.getOzonePathKey(
+          keyInfo.getParentObjectID(), keyInfo.getFileName());
+      omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
+          ozoneDbKey);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.info("Move keyName:{} to DeletedTable DBKey: {}",
+            keyInfo.getKeyName(), ozoneDbKey);
+      }
+
+      RepeatedOmKeyInfo repeatedOmKeyInfo = null;
+      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(keyInfo,
+          repeatedOmKeyInfo, keyInfo.getUpdateID(), isRatisEnabled);
+
+      String deletedKey = omMetadataManager
+          .getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(),
+              keyInfo.getKeyName());
+
+      omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
+          deletedKey, repeatedOmKeyInfo);
+
+    }
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
index 2bbeae0..299c063 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java
@@ -64,5 +64,8 @@
         omKeyInfo);
   }
 
+  public OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseWithFSO.java
new file mode 100644
index 0000000..995644f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponseWithFSO.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.response.key.acl;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.jetbrains.annotations.NotNull;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+
+/**
+ * Response for Bucket acl request for prefix layout.
+ */
+@CleanupTableInfo(cleanupTables = { FILE_TABLE, DIRECTORY_TABLE })
+public class OMKeyAclResponseWithFSO extends OMKeyAclResponse {
+
+  private boolean isDirectory;
+
+  public OMKeyAclResponseWithFSO(
+      @NotNull OzoneManagerProtocolProtos.OMResponse omResponse,
+      @NotNull OmKeyInfo omKeyInfo, boolean isDirectory) {
+    super(omResponse, omKeyInfo);
+    this.isDirectory = isDirectory;
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   *
+   * @param omResponse
+   */
+  public OMKeyAclResponseWithFSO(
+      @NotNull OzoneManagerProtocolProtos.OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    String ozoneDbKey = omMetadataManager
+        .getOzonePathKey(getOmKeyInfo().getParentObjectID(),
+            getOmKeyInfo().getFileName());
+    if (isDirectory) {
+      OmDirectoryInfo dirInfo = OMFileRequest.getDirectoryInfo(getOmKeyInfo());
+      omMetadataManager.getDirectoryTable()
+          .putWithBatch(batchOperation, ozoneDbKey, dirInfo);
+    } else {
+      omMetadataManager.getKeyTable()
+          .putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo());
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java
new file mode 100644
index 0000000..c655c47
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponseWithFSO.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for S3 Initiate Multipart Upload request for prefix layout.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE, OPEN_FILE_TABLE,
+    MULTIPARTINFO_TABLE})
+public class S3InitiateMultipartUploadResponseWithFSO extends
+        S3InitiateMultipartUploadResponse {
+  private List<OmDirectoryInfo> parentDirInfos;
+  private String mpuDBKey;
+
+  public S3InitiateMultipartUploadResponseWithFSO(
+      @Nonnull OMResponse omResponse,
+      @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nonnull OmKeyInfo omKeyInfo, @Nonnull String mpuDBKey,
+      @Nonnull List<OmDirectoryInfo> parentDirInfos) {
+    super(omResponse, omMultipartKeyInfo, omKeyInfo);
+    this.parentDirInfos = parentDirInfos;
+    this.mpuDBKey = mpuDBKey;
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public S3InitiateMultipartUploadResponseWithFSO(
+      @Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    /**
+     * Create parent directory entries during MultiPartFileKey Create - do not
+     * wait for File Commit request.
+     */
+    if (parentDirInfos != null) {
+      for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+        String parentKey = parentDirInfo.getPath();
+        omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
+                parentKey, parentDirInfo);
+      }
+    }
+
+    OMFileRequest.addToOpenFileTable(omMetadataManager, batchOperation,
+        getOmKeyInfo(), getOmMultipartKeyInfo().getUploadID());
+
+    omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
+        mpuDBKey, getOmMultipartKeyInfo());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
index d641875..1e4d395 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
@@ -49,15 +49,18 @@
 public class S3MultipartUploadAbortResponse extends OMClientResponse {
 
   private String multipartKey;
+  private String multipartOpenKey;
   private OmMultipartKeyInfo omMultipartKeyInfo;
   private boolean isRatisEnabled;
   private OmBucketInfo omBucketInfo;
 
   public S3MultipartUploadAbortResponse(@Nonnull OMResponse omResponse,
-      String multipartKey, @Nonnull OmMultipartKeyInfo omMultipartKeyInfo,
-      boolean isRatisEnabled, @Nonnull OmBucketInfo omBucketInfo) {
+      String multipartKey, String multipartOpenKey,
+      @Nonnull OmMultipartKeyInfo omMultipartKeyInfo, boolean isRatisEnabled,
+      @Nonnull OmBucketInfo omBucketInfo) {
     super(omResponse);
     this.multipartKey = multipartKey;
+    this.multipartOpenKey = multipartOpenKey;
     this.omMultipartKeyInfo = omMultipartKeyInfo;
     this.isRatisEnabled = isRatisEnabled;
     this.omBucketInfo = omBucketInfo;
@@ -78,7 +81,7 @@
 
     // Delete from openKey table and multipart info table.
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        multipartKey);
+        multipartOpenKey);
     omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
         multipartKey);
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java
new file mode 100644
index 0000000..9e43dea
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponseWithFSO.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for Multipart Abort Request - prefix layout.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, DELETED_TABLE,
+    MULTIPARTINFO_TABLE})
+public class S3MultipartUploadAbortResponseWithFSO
+    extends S3MultipartUploadAbortResponse {
+
+  public S3MultipartUploadAbortResponseWithFSO(@Nonnull OMResponse omResponse,
+      String multipartKey, String multipartOpenKey,
+      @Nonnull OmMultipartKeyInfo omMultipartKeyInfo, boolean isRatisEnabled,
+      @Nonnull OmBucketInfo omBucketInfo) {
+
+    super(omResponse, multipartKey, multipartOpenKey, omMultipartKeyInfo,
+        isRatisEnabled, omBucketInfo);
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public S3MultipartUploadAbortResponseWithFSO(
+      @Nonnull OMResponse omResponse) {
+    super(omResponse);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
index c2b119b..bcfff8b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
@@ -69,6 +69,9 @@
    * @param openKey
    * @param omMultipartKeyInfo
    * @param oldPartKeyInfo
+   * @param openPartKeyInfoToBeDeleted
+   * @param isRatisEnabled
+   * @param omBucketInfo
    */
   @SuppressWarnings("checkstyle:ParameterNumber")
   public S3MultipartUploadCommitPartResponse(@Nonnull OMResponse omResponse,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java
new file mode 100644
index 0000000..6f60498
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponseWithFSO.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
+
+/**
+ * Response for S3MultipartUploadCommitPartWithFSO request.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, DELETED_TABLE,
+    MULTIPARTINFO_TABLE})
+public class S3MultipartUploadCommitPartResponseWithFSO
+        extends S3MultipartUploadCommitPartResponse {
+
+  /**
+   * Regular response.
+   * 1. Update MultipartKey in MultipartInfoTable with new PartKeyInfo
+   * 2. Delete openKey from OpenKeyTable
+   * 3. If old PartKeyInfo exists, put it in DeletedKeyTable
+   * @param omResponse
+   * @param multipartKey
+   * @param openKey
+   * @param omMultipartKeyInfo
+   * @param oldPartKeyInfo
+   * @param openPartKeyInfoToBeDeleted
+   * @param isRatisEnabled
+   * @param omBucketInfo
+   */
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCommitPartResponseWithFSO(
+      @Nonnull OMResponse omResponse, String multipartKey, String openKey,
+      @Nullable OmMultipartKeyInfo omMultipartKeyInfo,
+      @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+      @Nullable OmKeyInfo openPartKeyInfoToBeDeleted, boolean isRatisEnabled,
+      @Nonnull OmBucketInfo omBucketInfo) {
+
+    super(omResponse, multipartKey, openKey, omMultipartKeyInfo,
+            oldPartKeyInfo, openPartKeyInfoToBeDeleted, isRatisEnabled,
+            omBucketInfo);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
index 20e398e..f89fea9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java
@@ -39,22 +39,30 @@
 
 /**
  * Response for Multipart Upload Complete request.
+ *
+ * This performs:
+ * 1) Delete multipart key from OpenKeyTable, MPUTable,
+ * 2) Add key to KeyTable,
+ * 3) Delete unused parts.
  */
 @CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE, DELETED_TABLE,
     MULTIPARTINFO_TABLE})
 public class S3MultipartUploadCompleteResponse extends OMClientResponse {
   private String multipartKey;
+  private String multipartOpenKey;
   private OmKeyInfo omKeyInfo;
   private List<OmKeyInfo> partsUnusedList;
 
   public S3MultipartUploadCompleteResponse(
       @Nonnull OMResponse omResponse,
       @Nonnull String multipartKey,
+      @Nonnull String multipartOpenKey,
       @Nonnull OmKeyInfo omKeyInfo,
       @Nonnull List<OmKeyInfo> unUsedParts) {
     super(omResponse);
     this.partsUnusedList = unUsedParts;
     this.multipartKey = multipartKey;
+    this.multipartOpenKey = multipartOpenKey;
     this.omKeyInfo = omKeyInfo;
   }
 
@@ -71,16 +79,16 @@
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
+    // 1. Delete multipart key from OpenKeyTable, MPUTable
     omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
-        multipartKey);
+        multipartOpenKey);
     omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation,
         multipartKey);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
-        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
-    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey,
-        omKeyInfo);
+    // 2. Add key to KeyTable
+    String ozoneKey = addToKeyTable(omMetadataManager, batchOperation);
 
+    // 3. Delete unused parts
     if (!partsUnusedList.isEmpty()) {
       // Add unused parts to deleted key table.
       RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable()
@@ -96,4 +104,25 @@
     }
   }
 
-}
\ No newline at end of file
+  protected String addToKeyTable(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(),
+        omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
+    omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey,
+        omKeyInfo);
+    return ozoneKey;
+  }
+
+  protected String getMultipartKey() {
+    return multipartKey;
+  }
+
+  protected OmKeyInfo getOmKeyInfo() {
+    return omKeyInfo;
+  }
+
+  protected List<OmKeyInfo> getPartsUnusedList() {
+    return partsUnusedList;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java
new file mode 100644
index 0000000..6776165
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.MULTIPARTINFO_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for Multipart Upload Complete request.
+ *
+ * This performs:
+ * 1) Delete multipart key from OpenFileTable, MPUTable,
+ * 2) Add file to FileTable,
+ * 3) Delete unused parts.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE, DELETED_TABLE,
+    MULTIPARTINFO_TABLE})
+public class S3MultipartUploadCompleteResponseWithFSO
+        extends S3MultipartUploadCompleteResponse {
+
+  public S3MultipartUploadCompleteResponseWithFSO(
+      @Nonnull OMResponse omResponse,
+      @Nonnull String multipartKey,
+      @Nonnull String multipartOpenKey,
+      @Nonnull OmKeyInfo omKeyInfo,
+      @Nonnull List<OmKeyInfo> unUsedParts) {
+    super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, unUsedParts);
+  }
+
+  /**
+   * For when the request is not successful.
+   * For a successful request, the other constructor should be used.
+   */
+  public S3MultipartUploadCompleteResponseWithFSO(
+      @Nonnull OMResponse omResponse) {
+    super(omResponse);
+    checkStatusNotOK();
+  }
+
+  @Override
+  protected String addToKeyTable(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    String ozoneKey = omMetadataManager
+        .getOzoneKey(getOmKeyInfo().getVolumeName(),
+            getOmKeyInfo().getBucketName(), getOmKeyInfo().getKeyName());
+
+    OMFileRequest
+        .addToFileTable(omMetadataManager, batchOperation, getOmKeyInfo());
+
+    return ozoneKey;
+
+  }
+
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 1e65ff8..facb82b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -26,6 +26,7 @@
 import java.util.UUID;
 
 import com.google.common.base.Optional;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -34,12 +35,15 @@
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -68,6 +72,7 @@
 
 import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
 import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
 import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -279,6 +284,25 @@
   }
 
   /**
+   * Add dir key entry to DirectoryTable.
+   *
+   * @throws Exception
+   */
+  public static void addDirKeyToDirTable(boolean addToCache,
+                                         OmDirectoryInfo omDirInfo,
+                                         long trxnLogIndex,
+                                         OMMetadataManager omMetadataManager)
+          throws Exception {
+    String ozoneKey = omDirInfo.getPath();
+    if (addToCache) {
+      omMetadataManager.getDirectoryTable().addCacheEntry(
+              new CacheKey<>(ozoneKey),
+              new CacheValue<>(Optional.of(omDirInfo), trxnLogIndex));
+    }
+    omMetadataManager.getDirectoryTable().put(ozoneKey, omDirInfo);
+  }
+
+  /**
    * Create OmKeyInfo.
    */
   public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
@@ -289,6 +313,22 @@
   }
 
   /**
+   * Create OmDirectoryInfo.
+   */
+  public static OmDirectoryInfo createOmDirectoryInfo(String keyName,
+                                                      long objectID,
+                                                      long parentObjID) {
+    return new OmDirectoryInfo.Builder()
+            .setName(keyName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setObjectID(objectID)
+            .setParentObjectID(parentObjID)
+            .setUpdateID(objectID)
+            .build();
+  }
+
+  /**
    * Create OmKeyInfo.
    */
   public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
@@ -440,6 +480,25 @@
         .setClientId(UUID.randomUUID().toString()).build();
   }
 
+  public static OzoneManagerProtocolProtos.OMRequest createBucketReqFSO(
+          String bucketName, String volumeName, boolean isVersionEnabled,
+          OzoneManagerProtocolProtos.StorageTypeProto storageTypeProto) {
+    OzoneManagerProtocolProtos.BucketInfo bucketInfo =
+            OzoneManagerProtocolProtos.BucketInfo.newBuilder()
+                    .setBucketName(bucketName)
+                    .setVolumeName(volumeName)
+                    .setIsVersionEnabled(isVersionEnabled)
+                    .setStorageType(storageTypeProto)
+                    .addAllMetadata(getMetadataListFSO()).build();
+    OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
+            OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
+    req.setBucketInfo(bucketInfo);
+    return OzoneManagerProtocolProtos.OMRequest.newBuilder()
+            .setCreateBucketRequest(req)
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket)
+            .setClientId(UUID.randomUUID().toString()).build();
+  }
+
   public static List< HddsProtos.KeyValue> getMetadataList() {
     List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
     metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
@@ -449,6 +508,20 @@
     return metadataList;
   }
 
+  public static List< HddsProtos.KeyValue> getMetadataListFSO() {
+    List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(
+            "value1").build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue(
+            "value2").build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey(
+            OMConfigKeys.OZONE_OM_METADATA_LAYOUT).setValue(
+            OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX).build());
+    metadataList.add(HddsProtos.KeyValue.newBuilder().setKey(
+            OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS).setValue(
+            "false").build());
+    return metadataList;
+  }
 
   /**
    * Add user to user table.
@@ -825,4 +898,123 @@
         new CacheKey<>(dbVolumeKey),
         new CacheValue<>(Optional.of(omVolumeArgs), 1L));
   }
+
+  /**
+   * Create OmKeyInfo.
+   */
+  @SuppressWarnings("parameterNumber")
+  public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
+      String keyName, HddsProtos.ReplicationType replicationType,
+      HddsProtos.ReplicationFactor replicationFactor, long objectID,
+      long parentID, long trxnLogIndex, long creationTime) {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(keyName)
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .setCreationTime(creationTime)
+            .setModificationTime(Time.now())
+            .setDataSize(1000L)
+            .setReplicationConfig(ReplicationConfig
+                    .fromTypeAndFactor(replicationType, replicationFactor))
+            .setObjectID(objectID)
+            .setUpdateID(trxnLogIndex)
+            .setParentObjectID(parentID)
+            .setFileName(fileName)
+            .build();
+  }
+
+
+  /**
+   * Add key entry to KeyTable. if openKeyTable flag is true, add's entries
+   * to openKeyTable, else add's it to keyTable.
+   *
+   * @throws Exception DB failure
+   */
+  public static void addFileToKeyTable(boolean openKeyTable,
+                                       boolean addToCache, String fileName,
+                                       OmKeyInfo omKeyInfo,
+                                       long clientID, long trxnLogIndex,
+                                       OMMetadataManager omMetadataManager)
+          throws Exception {
+    if (openKeyTable) {
+      String ozoneKey = omMetadataManager.getOpenFileName(
+              omKeyInfo.getParentObjectID(), fileName, clientID);
+      if (addToCache) {
+        omMetadataManager.getOpenKeyTable().addCacheEntry(
+                new CacheKey<>(ozoneKey),
+                new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omMetadataManager.getOpenKeyTable().put(ozoneKey, omKeyInfo);
+    } else {
+      String ozoneKey = omMetadataManager.getOzonePathKey(
+              omKeyInfo.getParentObjectID(), fileName);
+      if (addToCache) {
+        omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+                new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+      }
+      omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+    }
+  }
+
+  /**
+   * Gets bucketId from OM metadata manager.
+   *
+   * @param volumeName        volume name
+   * @param bucketName        bucket name
+   * @param omMetadataManager metadata manager
+   * @return bucket Id
+   * @throws Exception DB failure
+   */
+  public static long getBucketId(String volumeName, String bucketName,
+                                 OMMetadataManager omMetadataManager)
+          throws Exception {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    return omBucketInfo.getObjectID();
+  }
+
+  /**
+   * Add path components to the directory table and returns last directory's
+   * object id.
+   *
+   * @param volumeName volume name
+   * @param bucketName bucket name
+   * @param key        key name
+   * @param omMetaMgr  metdata manager
+   * @return last directory object id
+   * @throws Exception
+   */
+  public static long addParentsToDirTable(String volumeName, String bucketName,
+                                    String key, OMMetadataManager omMetaMgr)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetaMgr);
+    if (org.apache.commons.lang3.StringUtils.isBlank(key)) {
+      return bucketId;
+    }
+    String[] pathComponents = StringUtils.split(key, '/');
+    long objectId = bucketId + 10;
+    long parentId = bucketId;
+    long txnID = 50;
+    for (String pathElement : pathComponents) {
+      OmDirectoryInfo omDirInfo =
+              TestOMRequestUtils.createOmDirectoryInfo(pathElement, ++objectId,
+                      parentId);
+      TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+              txnID, omMetaMgr);
+      parentId = omDirInfo.getObjectID();
+    }
+    return parentId;
+  }
+
+  public static void configureFSOptimizedPaths(Configuration conf,
+      boolean enableFileSystemPaths, String version) {
+    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+            enableFileSystemPaths);
+    conf.set(OMConfigKeys.OZONE_OM_METADATA_LAYOUT, version);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
index 7ae82f8..873deda 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java
@@ -74,6 +74,7 @@
     auditLogger = Mockito.mock(AuditLogger.class);
     when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
     Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+    when(ozoneManager.getOMMetadataLayout()).thenReturn(null);
   }
 
   @After
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
index 66faa40..4ccf420 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java
@@ -186,7 +186,7 @@
 
   }
 
-  private void verifyRequest(OMRequest modifiedOmRequest,
+  protected void verifyRequest(OMRequest modifiedOmRequest,
       OMRequest originalRequest) {
     OzoneManagerProtocolProtos.BucketInfo original =
         originalRequest.getCreateBucketRequest().getBucketInfo();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java
new file mode 100644
index 0000000..f63ce17
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.bucket;
+
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests OMBucketCreateRequest class, which handles CreateBucket request.
+ */
+public class TestOMBucketCreateRequestWithFSO
+    extends TestOMBucketCreateRequest {
+
+  @Test
+  public void testValidateAndUpdateCacheWithFSO() throws Exception {
+    when(ozoneManager.getOMMetadataLayout()).thenReturn(
+            OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+
+    OMBucketCreateRequest omBucketCreateRequest = doPreExecute(volumeName,
+        bucketName);
+
+    doValidateAndUpdateCache(volumeName, bucketName,
+        omBucketCreateRequest.getOmRequest());
+  }
+
+  private OMBucketCreateRequest doPreExecute(String volumeName,
+      String bucketName) throws Exception {
+    addCreateVolumeToTable(volumeName, omMetadataManager);
+    OMRequest originalRequest =
+        TestOMRequestUtils.createBucketReqFSO(bucketName, volumeName,
+                false, StorageTypeProto.SSD);
+
+    OMBucketCreateRequest omBucketCreateRequest =
+        new OMBucketCreateRequest(originalRequest);
+
+    OMRequest modifiedRequest = omBucketCreateRequest.preExecute(ozoneManager);
+    verifyRequest(modifiedRequest, originalRequest);
+    return new OMBucketCreateRequest(modifiedRequest);
+  }
+
+  private void doValidateAndUpdateCache(String volumeName, String bucketName,
+      OMRequest modifiedRequest) throws Exception {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+
+    // As we have not still called validateAndUpdateCache, get() should
+    // return null.
+
+    Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey));
+    OMBucketCreateRequest omBucketCreateRequest =
+        new OMBucketCreateRequest(modifiedRequest);
+
+
+    OMClientResponse omClientResponse =
+        omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1,
+            ozoneManagerDoubleBufferHelper);
+
+    // As now after validateAndUpdateCache it should add entry to cache, get
+    // should return non null value.
+    OmBucketInfo dbBucketInfo =
+        omMetadataManager.getBucketTable().get(bucketKey);
+    Assert.assertNotNull(omMetadataManager.getBucketTable().get(bucketKey));
+
+    // verify table data with actual request data.
+    OmBucketInfo bucketInfoFromProto = OmBucketInfo.getFromProtobuf(
+        modifiedRequest.getCreateBucketRequest().getBucketInfo());
+
+    Assert.assertEquals(bucketInfoFromProto.getCreationTime(),
+        dbBucketInfo.getCreationTime());
+    Assert.assertEquals(bucketInfoFromProto.getModificationTime(),
+        dbBucketInfo.getModificationTime());
+    Assert.assertEquals(bucketInfoFromProto.getAcls(),
+        dbBucketInfo.getAcls());
+    Assert.assertEquals(bucketInfoFromProto.getIsVersionEnabled(),
+        dbBucketInfo.getIsVersionEnabled());
+    Assert.assertEquals(bucketInfoFromProto.getStorageType(),
+        dbBucketInfo.getStorageType());
+    Assert.assertEquals(bucketInfoFromProto.getMetadata(),
+        dbBucketInfo.getMetadata());
+    Assert.assertEquals(bucketInfoFromProto.getEncryptionKeyInfo(),
+        dbBucketInfo.getEncryptionKeyInfo());
+
+    // verify OMResponse.
+    verifySuccessCreateBucketResponse(omClientResponse.getOMResponse());
+
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java
new file mode 100644
index 0000000..f147e91
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java
@@ -0,0 +1,657 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditMessage;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ResolvedBucket;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.jetbrains.annotations.NotNull;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test OM directory create request - prefix layout.
+ */
+public class TestOMDirectoryCreateRequestWithFSO {
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OzoneManager ozoneManager;
+  private OMMetrics omMetrics;
+  private OMMetadataManager omMetadataManager;
+  private AuditLogger auditLogger;
+  // Just setting ozoneManagerDoubleBuffer which does nothing.
+  private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
+          ((response, transactionIndex) -> {
+            return null;
+          });
+
+  @Before
+  public void setup() throws Exception {
+    ozoneManager = Mockito.mock(OzoneManager.class);
+    omMetrics = OMMetrics.create();
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+            folder.newFolder().getAbsolutePath());
+    TestOMRequestUtils.configureFSOptimizedPaths(ozoneConfiguration,
+            true, OMConfigKeys.OZONE_OM_METADATA_LAYOUT_PREFIX);
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
+    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
+    auditLogger = Mockito.mock(AuditLogger.class);
+    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
+    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+    when(ozoneManager.resolveBucketLink(any(KeyArgs.class),
+            any(OMClientRequest.class)))
+            .thenReturn(new ResolvedBucket(Pair.of("", ""), Pair.of("", "")));
+  }
+
+  @After
+  public void stop() {
+    omMetrics.unRegister();
+    Mockito.framework().clearInlineMocks();
+  }
+
+  @Test
+  public void testPreExecute() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    String keyName = "a/b/c";
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestWithFSO omDirectoryCreateRequestWithFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmRequest =
+            omDirectoryCreateRequestWithFSO.preExecute(ozoneManager);
+
+    // As in preExecute, we modify original request.
+    Assert.assertNotEquals(omRequest, modifiedOmRequest);
+  }
+
+  @Test
+  public void testValidateAndUpdateCache() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestWithFSO omDirCreateRequestFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq =
+        omDirCreateRequestFSO.preExecute(ozoneManager);
+
+    omDirCreateRequestFSO =
+        new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.OK);
+    verifyDirectoriesInDB(dirs, bucketID);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestWithFSO omDirCreateRequestFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmRequest =
+        omDirCreateRequestFSO.preExecute(ozoneManager);
+
+    omDirCreateRequestFSO =
+        new OMDirectoryCreateRequestWithFSO(modifiedOmRequest);
+
+    OMClientResponse omClientResponse =
+            omDirCreateRequestFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(VOLUME_NOT_FOUND,
+            omClientResponse.getOMResponse().getStatus());
+
+    // Key should not exist in DB
+    Assert.assertTrue("Unexpected directory entries!",
+            omMetadataManager.getDirectoryTable().isEmpty());
+
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestWithFSO omDirCreateReqFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq = omDirCreateReqFSO.preExecute(ozoneManager);
+
+    omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+    TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
+
+    OMClientResponse omClientResponse =
+            omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
+
+    // Key should not exist in DB
+    Assert.assertTrue("Unexpected directory entries!",
+            omMetadataManager.getDirectoryTable().isEmpty());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithSubDirectoryInPath()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+    int objID = 100;
+
+    //1. Create root
+    OmDirectoryInfo omDirInfo =
+            TestOMRequestUtils.createOmDirectoryInfo(dirs.get(0), objID++,
+                    bucketID);
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+            omMetadataManager);
+    //2. Create sub-directory under root
+    omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(dirs.get(1), objID++,
+            omDirInfo.getObjectID());
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestWithFSO omDirCreateReqFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq = omDirCreateReqFSO.preExecute(ozoneManager);
+
+    omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+
+    OMClientResponse omClientResponse =
+            omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.OK);
+
+    // Key should exist in DB and cache.
+    verifyDirectoriesInDB(dirs, bucketID);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithDirectoryAlreadyExists()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    // bucketID is the parent
+    long parentID = bucketID;
+
+    // add all the directories into DirectoryTable
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      long objID = 100 + indx;
+      long txnID = 5000 + indx;
+      // for index=0, parentID is bucketID
+      OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+              dirs.get(indx), objID, parentID);
+      TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+              txnID, omMetadataManager);
+
+      parentID = omDirInfo.getObjectID();
+    }
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestWithFSO omDirCreateReqFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq = omDirCreateReqFSO.preExecute(ozoneManager);
+
+    omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+
+    OMClientResponse omClientResponse =
+            omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.DIRECTORY_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should exist in DB and doesn't added to cache.
+    verifyDirectoriesInDB(dirs, bucketID);
+    verifyDirectoriesNotInCache(dirs, bucketID);
+  }
+
+  /**
+   * Case: File exists with the same name as the requested directory.
+   * Say, requested to createDir '/a/b/c' and there is a file exists with
+   * same name.
+   */
+  @Test
+  public void testValidateAndUpdateCacheWithFilesInPath() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long parentID = omBucketInfo.getObjectID();
+
+    // add all the parent directories into DirectoryTable. This won't create
+    // the leaf node and this will be used in CreateDirectoryReq.
+    for (int indx = 0; indx < dirs.size() - 1; indx++) {
+      long objID = 100 + indx;
+      long txnID = 5000 + indx;
+      // for index=0, parentID is bucketID
+      OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+              dirs.get(indx), objID, parentID);
+      TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+              txnID, omMetadataManager);
+
+      parentID = omDirInfo.getObjectID();
+    }
+
+    long objID = parentID + 100;
+    long txnID = 50000;
+
+    // Add a file into the FileTable, this is to simulate "file exists" check.
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE, objID++);
+    String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1);
+    ++txnID;
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneFileName),
+            new CacheValue<>(Optional.of(omKeyInfo), txnID));
+    omMetadataManager.getKeyTable().put(ozoneFileName, omKeyInfo);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestWithFSO omDirCreateReqFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq =
+            omDirCreateReqFSO.preExecute(ozoneManager);
+
+    omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+
+    OMClientResponse omClientResponse =
+            omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+            == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should not exist in DB
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(ozoneFileName));
+    // Key should not exist in DB
+    Assert.assertEquals("Wrong directories count!", 3,
+            omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+  }
+
+
+  /**
+   * Case: File exists in the given path.
+   * Say, requested to createDir '/a/b/c/d' and there is a file '/a/b' exists
+   * in the given path.
+   */
+  @Test
+  public void testValidateAndUpdateCacheWithFileExistsInGivenPath()
+          throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long parentID = omBucketInfo.getObjectID();
+
+    long objID = parentID + 100;
+    long txnID = 5000;
+
+    // for index=0, parentID is bucketID
+    OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+            dirs.get(0), objID++, parentID);
+    TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+            txnID, omMetadataManager);
+    parentID = omDirInfo.getObjectID();
+
+    // Add a key in second level.
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE, objID);
+
+    String ozoneKey = parentID + "/" + dirs.get(1);
+    ++txnID;
+    omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+            new CacheValue<>(Optional.of(omKeyInfo), txnID));
+    omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            keyName);
+    OMDirectoryCreateRequestWithFSO omDirCreateReqFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq =
+            omDirCreateReqFSO.preExecute(ozoneManager);
+
+    omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+
+    OMClientResponse omClientResponse =
+            omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertTrue("Invalid response code:" +
+                    omClientResponse.getOMResponse().getStatus(),
+            omClientResponse.getOMResponse().getStatus()
+                    == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+    Assert.assertEquals("Wrong OM numKeys metrics",
+            0, ozoneManager.getMetrics().getNumKeys());
+
+    // Key should not exist in DB
+    Assert.assertTrue(omMetadataManager.getKeyTable().get(ozoneKey) != null);
+    // Key should not exist in DB
+    Assert.assertEquals("Wrong directories count!",
+            1, omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+  }
+
+  @Test
+  public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 255);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestWithFSO omDirCreateReqFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq = omDirCreateReqFSO.preExecute(ozoneManager);
+
+    omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    verifyDirectoriesInDB(dirs, bucketID);
+
+    Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+  }
+
+  @Test
+  public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 256);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestWithFSO omDirCreateReqFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq = omDirCreateReqFSO.preExecute(ozoneManager);
+
+    omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateReqFSO.validateAndUpdateCache(ozoneManager,
+                    100L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
+            omClientResponse.getOMResponse().getStatus());
+
+    Assert.assertEquals("Unexpected directories!", 0,
+            omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+
+    Assert.assertEquals(0, omMetrics.getNumKeys());
+  }
+
+  @Test
+  public void testCreateDirectoryOMMetric() throws Exception {
+    String volumeName = "vol1";
+    String bucketName = "bucket1";
+    List<String> dirs = new ArrayList<String>();
+    String keyName = createDirKey(dirs, 3);
+
+    // Add volume and bucket entries to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+            OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+    OMDirectoryCreateRequestWithFSO omDirCreateReqFSO =
+            new OMDirectoryCreateRequestWithFSO(omRequest);
+
+    OMRequest modifiedOmReq = omDirCreateReqFSO.preExecute(ozoneManager);
+
+    omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq);
+
+    Assert.assertEquals(0L, omMetrics.getNumKeys());
+    OMClientResponse omClientResponse =
+            omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    verifyDirectoriesInDB(dirs, bucketID);
+
+    Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+  }
+
+
+  @NotNull
+  private String createDirKey(List<String> dirs, int depth) {
+    String keyName = RandomStringUtils.randomAlphabetic(5);
+    dirs.add(keyName);
+    StringBuffer buf = new StringBuffer(keyName);
+    for (int i = 0; i < depth; i++) {
+      String dirName = RandomStringUtils.randomAlphabetic(5);
+      dirs.add(dirName);
+      buf.append(OzoneConsts.OM_KEY_PREFIX);
+      buf.append(dirName);
+    }
+    return buf.toString();
+  }
+
+  private void verifyDirectoriesInDB(List<String> dirs, long bucketID)
+          throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      OmDirectoryInfo omDirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      Assert.assertNotNull("Invalid directory!", omDirInfo);
+      Assert.assertEquals("Invalid directory!", dirName, omDirInfo.getName());
+      Assert.assertEquals("Invalid dir path!",
+              parentID + "/" + dirName, omDirInfo.getPath());
+      parentID = omDirInfo.getObjectID();
+    }
+  }
+
+  private void verifyDirectoriesNotInCache(List<String> dirs, long bucketID)
+          throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      CacheValue<OmDirectoryInfo> omDirInfoCacheValue =
+              omMetadataManager.getDirectoryTable()
+                      .getCacheValue(new CacheKey<>(dbKey));
+      Assert.assertNull("Unexpected directory!", omDirInfoCacheValue);
+    }
+  }
+
+  /**
+   * Create OMRequest which encapsulates CreateDirectory request.
+   *
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @return OMRequest
+   */
+  private OMRequest createDirectoryRequest(String volumeName, String bucketName,
+                                           String keyName) {
+    return OMRequest.newBuilder().setCreateDirectoryRequest(
+            CreateDirectoryRequest.newBuilder().setKeyArgs(
+                    KeyArgs.newBuilder().setVolumeName(volumeName)
+                            .setBucketName(bucketName).setKeyName(keyName)))
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+            .setClientId(UUID.randomUUID().toString()).build();
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index a500f4c..0a76589 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -21,6 +21,7 @@
 import java.util.List;
 import java.util.UUID;
 
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -55,8 +56,7 @@
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
 
-    OMFileCreateRequest omFileCreateRequest =
-        new OMFileCreateRequest(omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
     Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -96,8 +96,7 @@
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
         false, false);
 
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
     Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -121,21 +120,17 @@
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
     long id = modifiedOmRequest.getCreateFileRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
-
     // Before calling
-    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(keyName, id, false);
     Assert.assertNull(omKeyInfo);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -146,8 +141,7 @@
 
     // Check open table whether key is added or not.
 
-    omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-    Assert.assertNotNull(omKeyInfo);
+    omKeyInfo = verifyPathInOpenKeyTable(keyName, id, true);
 
     List< OmKeyLocationInfo > omKeyLocationInfoList =
         omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -179,12 +173,11 @@
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
             false, true);
 
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -200,13 +193,11 @@
         false, true);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
-
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -311,8 +302,7 @@
     testNonRecursivePath(key, false, false, true);
   }
 
-
-  private void testNonRecursivePath(String key,
+  protected void testNonRecursivePath(String key,
       boolean overWrite, boolean recursive, boolean fail) throws Exception {
     OMRequest omRequest = createFileRequest(volumeName, bucketName, key,
         HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
@@ -320,12 +310,11 @@
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
-        omRequest);
+    OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
 
     OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
 
-    omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+    omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
 
     OMClientResponse omFileCreateResponse =
         omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -341,10 +330,9 @@
       Assert.assertTrue(omFileCreateResponse.getOMResponse().getSuccess());
       long id = modifiedOmRequest.getCreateFileRequest().getClientID();
 
-      String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-          key, id);
-      OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
-      Assert.assertNotNull(omKeyInfo);
+      verifyKeyNameInCreateFileResponse(key, omFileCreateResponse);
+
+      OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(key, id, true);
 
       List< OmKeyLocationInfo > omKeyLocationInfoList =
           omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -368,6 +356,14 @@
     }
   }
 
+  private void verifyKeyNameInCreateFileResponse(String key,
+      OMClientResponse omFileCreateResponse) {
+    OzoneManagerProtocolProtos.CreateFileResponse createFileResponse =
+            omFileCreateResponse.getOMResponse().getCreateFileResponse();
+    String actualFileName = createFileResponse.getKeyInfo().getKeyName();
+    Assert.assertEquals("Incorrect keyName", key, actualFileName);
+  }
+
   /**
    * Create OMRequest which encapsulates OMFileCreateRequest.
    * @param volumeName
@@ -377,7 +373,8 @@
    * @param replicationType
    * @return OMRequest
    */
-  private OMRequest createFileRequest(
+  @NotNull
+  protected OMRequest createFileRequest(
       String volumeName, String bucketName, String keyName,
       HddsProtos.ReplicationFactor replicationFactor,
       HddsProtos.ReplicationType replicationType, boolean overWrite,
@@ -399,4 +396,16 @@
         .setCreateFileRequest(createFileRequest).build();
 
   }
+
+  /**
+   * Gets OMFileCreateRequest reference.
+   *
+   * @param omRequest om request
+   * @return OMFileCreateRequest reference
+   */
+  @NotNull
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest){
+    return new OMFileCreateRequest(omRequest);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java
new file mode 100644
index 0000000..858a075
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+/**
+ * Tests OMFileCreateRequest - prefix layout.
+ */
+public class TestOMFileCreateRequestWithFSO extends TestOMFileCreateRequest {
+
+  @Test
+  public void testValidateAndUpdateCacheWithNonRecursive() throws Exception {
+    testNonRecursivePath(UUID.randomUUID().toString(), false, false, false);
+    testNonRecursivePath("a/b", false, false, true);
+    Assert.assertEquals("Invalid metrics value", 0, omMetrics.getNumKeys());
+
+    // Create parent dirs for the path
+    TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            "a/b/c", omMetadataManager);
+    String fileNameD = "d";
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+            "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+
+    // cannot create file if directory of same name exists
+    testNonRecursivePath("a/b/c", false, false, true);
+
+    // Delete child key but retain path "a/b/ in the key table
+    OmDirectoryInfo dirPathC = getDirInfo("a/b/c");
+    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+    String dbFileD = omMetadataManager.getOzonePathKey(
+            dirPathC.getObjectID(), fileNameD);
+    omMetadataManager.getKeyTable().delete(dbFileD);
+    omMetadataManager.getKeyTable().delete(dirPathC.getPath());
+
+    // can create non-recursive because parents already exist.
+    testNonRecursivePath("a/b/e", false, false, false);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithRecursiveAndOverWrite()
+          throws Exception {
+    String key = "c/d/e/f";
+    // Should be able to create file even if parent directories does not exist
+    testNonRecursivePath(key, false, true, false);
+    Assert.assertEquals("Invalid metrics value", 3, omMetrics.getNumKeys());
+
+    // Add the key to key table
+    OmDirectoryInfo omDirInfo = getDirInfo("c/d/e");
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    omDirInfo.getObjectID() + 10,
+                    omDirInfo.getObjectID(), 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            "f", omKeyInfo, -1,
+            omDirInfo.getObjectID() + 10, omMetadataManager);
+
+    // Even if key exists, should be able to create file as overwrite is set
+    // to true
+    testNonRecursivePath(key, true, true, false);
+    testNonRecursivePath(key, false, true, true);
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite()
+          throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "f";
+    String key = parentDir + "/" + fileName;
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+
+    // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is
+    // non-recursive parent should exist.
+    testNonRecursivePath(key, false, false, false);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+
+    // Even if key exists in KeyTable, should be able to create file as
+    // overwrite is set to true
+    testNonRecursivePath(key, true, false, false);
+    testNonRecursivePath(key, false, false, true);
+  }
+
+  @Override
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                             boolean doAssert)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      if (indx == pathComponents.length - 1) {
+        String dbOpenFileName = omMetadataManager.getOpenFileName(
+                parentId, pathElement, id);
+        OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+                .get(dbOpenFileName);
+        if (doAssert) {
+          Assert.assertNotNull("Invalid key!", omKeyInfo);
+        }
+        return omKeyInfo;
+      } else {
+        // directory
+        String dbKey = omMetadataManager.getOzonePathKey(parentId,
+                pathElement);
+        OmDirectoryInfo dirInfo =
+                omMetadataManager.getDirectoryTable().get(dbKey);
+        parentId = dirInfo.getObjectID();
+      }
+    }
+    if (doAssert) {
+      Assert.fail("Invalid key!");
+    }
+    return null;
+  }
+
+  private OmDirectoryInfo getDirInfo(String key)
+          throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    OmDirectoryInfo dirInfo = null;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      // directory
+      String dbKey = omMetadataManager.getOzonePathKey(parentId,
+              pathElement);
+      dirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      parentId = dirInfo.getObjectID();
+    }
+    return dirInfo;
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata Layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+
+  @Override
+  protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) {
+    return new OMFileCreateRequestWithFSO(omRequest);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
index 4b3d38f..9d26d0d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java
@@ -24,6 +24,7 @@
 import java.util.UUID;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -57,21 +58,19 @@
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName);
 
     OMRequest modifiedOmRequest =
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
     // Check before calling validateAndUpdateCache. As adding DB entry has
     // not added any blocks, so size should be zero.
 
-    OmKeyInfo omKeyInfo =
-        omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey(
-            volumeName, bucketName, keyName, clientID));
+    OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(keyName, clientID,
+            true);
 
     List<OmKeyLocationInfo> omKeyLocationInfo =
         omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -87,10 +86,8 @@
 
     // Check open table whether new block is added or not.
 
-    omKeyInfo =
-        omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey(
-            volumeName, bucketName, keyName, clientID));
-
+    omKeyInfo = verifyPathInOpenKeyTable(keyName, clientID,
+            true);
 
     // Check modification time
     Assert.assertEquals(modifiedOmRequest.getAllocateBlockRequest()
@@ -119,6 +116,12 @@
 
   }
 
+  @NotNull
+  protected OMAllocateBlockRequest getOmAllocateBlockRequest(
+          OMRequest modifiedOmRequest) {
+    return new OMAllocateBlockRequest(modifiedOmRequest);
+  }
+
   @Test
   public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
 
@@ -126,7 +129,7 @@
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
 
     OMClientResponse omAllocateBlockResponse =
@@ -145,7 +148,7 @@
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
 
     // Added only volume to DB.
@@ -168,7 +171,7 @@
         doPreExecute(createAllocateBlockRequest());
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(modifiedOmRequest);
+            getOmAllocateBlockRequest(modifiedOmRequest);
 
     // Add volume, bucket entries to DB.
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
@@ -190,10 +193,11 @@
    * @return OMRequest - modified request returned from preExecute.
    * @throws Exception
    */
-  private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
+  protected OMRequest doPreExecute(OMRequest originalOMRequest)
+      throws Exception {
 
     OMAllocateBlockRequest omAllocateBlockRequest =
-        new OMAllocateBlockRequest(originalOMRequest);
+            getOmAllocateBlockRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest =
         omAllocateBlockRequest.preExecute(ozoneManager);
@@ -228,7 +232,7 @@
   }
 
 
-  private OMRequest createAllocateBlockRequest() {
+  protected OMRequest createAllocateBlockRequest() {
 
     KeyArgs keyArgs = KeyArgs.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
@@ -246,4 +250,12 @@
         .setAllocateBlockRequest(allocateBlockRequest).build();
 
   }
+
+  protected String addKeyToOpenKeyTable(String volumeName, String bucketName)
+          throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, replicationType, replicationFactor,
+            omMetadataManager);
+    return "";
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java
new file mode 100644
index 0000000..6b34088
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMAllocateBlockRequest class prefix layout.
+ */
+public class TestOMAllocateBlockRequestWithFSO
+    extends TestOMAllocateBlockRequest {
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+
+  @Override
+  protected String addKeyToOpenKeyTable(String volumeName, String bucketName)
+          throws Exception {
+    // need to initialize parentID
+    String parentDir = keyName;
+    String fileName = "file1";
+    keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
+
+    // add parentDir to dirTable
+    long parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+    long txnId = 50;
+    long objectId = parentID + 1;
+
+    OmKeyInfo omKeyInfoFSO =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager);
+
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  @NotNull
+  @Override
+  protected OMAllocateBlockRequest getOmAllocateBlockRequest(
+          OzoneManagerProtocolProtos.OMRequest modifiedOmRequest) {
+    return new OMAllocateBlockRequestWithFSO(modifiedOmRequest);
+  }
+
+  @Override
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+      boolean doAssert) throws Exception {
+    long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+            omMetadataManager);
+    String[] pathComponents = StringUtils.split(key, '/');
+    long parentId = bucketId;
+    for (int indx = 0; indx < pathComponents.length; indx++) {
+      String pathElement = pathComponents[indx];
+      // Reached last component, which is file name
+      if (indx == pathComponents.length - 1) {
+        String dbOpenFileName = omMetadataManager.getOpenFileName(
+                parentId, pathElement, id);
+        OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+                .get(dbOpenFileName);
+        if (doAssert) {
+          Assert.assertNotNull("Invalid key!", omKeyInfo);
+        }
+        return omKeyInfo;
+      } else {
+        // directory
+        String dbKey = omMetadataManager.getOzonePathKey(parentId,
+                pathElement);
+        OmDirectoryInfo dirInfo =
+                omMetadataManager.getDirectoryTable().get(dbKey);
+        parentId = dirInfo.getObjectID();
+      }
+    }
+    if (doAssert) {
+      Assert.fail("Invalid key!");
+    }
+    return  null;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
index 470cf60..0e28e4b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java
@@ -20,7 +20,9 @@
 import java.util.List;
 import java.util.UUID;
 import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest;
 import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequest;
@@ -46,16 +48,18 @@
     // Manually add volume, bucket and key to DB
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
 
     OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
 
     // Create KeyAddAcl request
     OMRequest originalRequest = createAddAclkeyRequest(acl);
-    OMKeyAddAclRequest omKeyAddAclRequest = new OMKeyAddAclRequest(
-        originalRequest);
+    OMKeyAclRequest omKeyAddAclRequest = getOmKeyAddAclRequest(originalRequest);
     OMRequest preExecuteRequest = omKeyAddAclRequest.preExecute(ozoneManager);
 
     // When preExecute() of adding acl,
@@ -68,7 +72,7 @@
 
     // Execute original request
     OMClientResponse omClientResponse = omKeyAddAclRequest
-        .validateAndUpdateCache(ozoneManager, 2,
+        .validateAndUpdateCache(ozoneManager, 100L,
             ozoneManagerDoubleBufferHelper);
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
@@ -79,19 +83,22 @@
   public void testKeyRemoveAclRequest() throws Exception {
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
 
     OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
 
     // Add acl.
     OMRequest addAclRequest = createAddAclkeyRequest(acl);
-    OMKeyAddAclRequest omKeyAddAclRequest =
-        new OMKeyAddAclRequest(addAclRequest);
+    OMKeyAclRequest omKeyAddAclRequest =
+        getOmKeyAddAclRequest(addAclRequest);
     omKeyAddAclRequest.preExecute(ozoneManager);
     OMClientResponse omClientAddAclResponse = omKeyAddAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
+        .validateAndUpdateCache(ozoneManager, 100L,
             ozoneManagerDoubleBufferHelper);
     OMResponse omAddAclResponse = omClientAddAclResponse.getOMResponse();
     Assert.assertNotNull(omAddAclResponse.getAddAclResponse());
@@ -99,8 +106,6 @@
         omAddAclResponse.getStatus());
 
     // Verify result of adding acl.
-    String ozoneKey = omMetadataManager
-        .getOzoneKey(volumeName, bucketName, keyName);
     List<OzoneAcl> keyAcls = omMetadataManager.getKeyTable().get(ozoneKey)
         .getAcls();
     Assert.assertEquals(1, keyAcls.size());
@@ -108,8 +113,8 @@
 
     // Remove acl.
     OMRequest removeAclRequest = createRemoveAclKeyRequest(acl);
-    OMKeyRemoveAclRequest omKeyRemoveAclRequest =
-        new OMKeyRemoveAclRequest(removeAclRequest);
+    OMKeyAclRequest omKeyRemoveAclRequest =
+        getOmKeyRemoveAclRequest(removeAclRequest);
     OMRequest preExecuteRequest = omKeyRemoveAclRequest
         .preExecute(ozoneManager);
 
@@ -122,7 +127,7 @@
     Assert.assertTrue(newModTime > originModTime);
 
     OMClientResponse omClientRemoveAclResponse = omKeyRemoveAclRequest
-        .validateAndUpdateCache(ozoneManager, 2,
+        .validateAndUpdateCache(ozoneManager, 100L,
             ozoneManagerDoubleBufferHelper);
     OMResponse omRemoveAclResponse = omClientRemoveAclResponse.getOMResponse();
     Assert.assertNotNull(omRemoveAclResponse.getRemoveAclResponse());
@@ -139,15 +144,18 @@
   public void testKeySetAclRequest() throws Exception {
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
-    TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName,
-        keyName, clientID, replicationType, replicationFactor, 1L,
-        omMetadataManager);
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
 
     OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]");
 
     OMRequest setAclRequest = createSetAclKeyRequest(acl);
-    OMKeySetAclRequest omKeySetAclRequest =
-        new OMKeySetAclRequest(setAclRequest);
+    OMKeyAclRequest omKeySetAclRequest =
+        getOmKeySetAclRequest(setAclRequest);
     OMRequest preExecuteRequest = omKeySetAclRequest.preExecute(ozoneManager);
 
     // When preExecute() of setting acl,
@@ -159,7 +167,7 @@
     Assert.assertTrue(newModTime > originModTime);
 
     OMClientResponse omClientResponse = omKeySetAclRequest
-        .validateAndUpdateCache(ozoneManager, 1,
+        .validateAndUpdateCache(ozoneManager, 100L,
             ozoneManagerDoubleBufferHelper);
     OMResponse omSetAclResponse = omClientResponse.getOMResponse();
     Assert.assertNotNull(omSetAclResponse.getSetAclResponse());
@@ -167,8 +175,6 @@
         omSetAclResponse.getStatus());
 
     // Verify result of setting acl.
-    String ozoneKey = omMetadataManager
-        .getOzoneKey(volumeName, bucketName, keyName);
     List<OzoneAcl> newAcls = omMetadataManager.getKeyTable().get(ozoneKey)
         .getAcls();
     Assert.assertEquals(newAcls.get(0), acl);
@@ -177,7 +183,7 @@
   /**
    * Create OMRequest which encapsulates OMKeyAddAclRequest.
    */
-  private OMRequest createAddAclkeyRequest(OzoneAcl acl) {
+  protected OMRequest createAddAclkeyRequest(OzoneAcl acl) {
     OzoneObj obj = OzoneObjInfo.Builder.newBuilder()
         .setBucketName(bucketName)
         .setVolumeName(volumeName)
@@ -233,4 +239,27 @@
         .setSetAclRequest(setAclRequest)
         .build();
   }
+
+  protected String addKeyToTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName,
+        keyName, clientID, replicationType, replicationFactor, 1L,
+        omMetadataManager);
+
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+        keyName);
+  }
+
+  protected OMKeyAclRequest getOmKeyAddAclRequest(OMRequest originalRequest) {
+    return new OMKeyAddAclRequest(
+        originalRequest);
+  }
+
+  protected OMKeyAclRequest getOmKeyRemoveAclRequest(
+      OMRequest removeAclRequest) {
+    return new OMKeyRemoveAclRequest(removeAclRequest);
+  }
+
+  protected OMKeyAclRequest getOmKeySetAclRequest(OMRequest setAclRequest) {
+    return new OMKeySetAclRequest(setAclRequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java
new file mode 100644
index 0000000..2870bb0
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAclRequest;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequestWithFSO;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO;;
+import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+
+/**
+ * Test Key ACL requests for prefix layout.
+ */
+public class TestOMKeyAclRequestWithFSO extends TestOMKeyAclRequest {
+
+  protected String addKeyToTable() throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "file1";
+    String key = parentDir + "/" + fileName;
+    keyName = key; // updated key name
+
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils
+        .addParentsToDirTable(volumeName, bucketName, parentDir,
+            omMetadataManager);
+
+    OmKeyInfo omKeyInfo = TestOMRequestUtils
+        .createOmKeyInfo(volumeName, bucketName, key,
+            HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE,
+            parentId + 1, parentId, 100, Time.now());
+    TestOMRequestUtils
+        .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50,
+            omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+
+  @Override protected OMKeyAclRequest getOmKeyAddAclRequest(
+      OzoneManagerProtocolProtos.OMRequest originalRequest) {
+    return new OMKeyAddAclRequestWithFSO(originalRequest);
+  }
+
+  @Override protected OMKeyAclRequest getOmKeyRemoveAclRequest(
+      OzoneManagerProtocolProtos.OMRequest removeAclRequest) {
+    return new OMKeyRemoveAclRequestWithFSO(removeAclRequest);
+  }
+
+  @Override protected OMKeyAclRequest getOmKeySetAclRequest(
+      OzoneManagerProtocolProtos.OMRequest setAclRequest) {
+    return new OMKeySetAclRequestWithFSO(setAclRequest);
+  }
+
+  @Override protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index f864426..229c23a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -19,12 +19,15 @@
 
 package org.apache.hadoop.ozone.om.request.key;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -48,6 +51,8 @@
  */
 public class TestOMKeyCommitRequest extends TestOMKeyRequest {
 
+  private String parentDir;
+
   @Test
   public void testPreExecute() throws Exception {
     doPreExecute(createCommitKeyRequest());
@@ -60,7 +65,7 @@
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+        getOmKeyCommitRequest(modifiedOmRequest);
 
     // Append 3 blocks locations.
     List<OmKeyLocationInfo> allocatedLocationList = getKeyLocation(3)
@@ -70,12 +75,7 @@
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager,
-        allocatedLocationList);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = addKeyToOpenKeyTable(allocatedLocationList);
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -114,11 +114,10 @@
   @Test
   public void testValidateAndUpdateCache() throws Exception {
 
-    OMRequest modifiedOmRequest =
-        doPreExecute(createCommitKeyRequest());
+    OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+            getOmKeyCommitRequest(modifiedOmRequest);
 
 
     KeyArgs keyArgs = modifiedOmRequest.getCommitKeyRequest().getKeyArgs();
@@ -132,12 +131,7 @@
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager,
-        allocatedLocationList);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = addKeyToOpenKeyTable(allocatedLocationList);
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -160,6 +154,8 @@
     omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
 
     Assert.assertNotNull(omKeyInfo);
+    // DB keyInfo format
+    verifyKeyName(omKeyInfo);
 
     // Check modification time
 
@@ -177,7 +173,14 @@
         omKeyInfo.getLatestVersionLocations().getLocationList());
     Assert.assertEquals(allocatedLocationList,
         omKeyInfo.getLatestVersionLocations().getLocationList());
+  }
 
+  @Test
+  public void testValidateAndUpdateCacheWithSubDirs() throws Exception {
+    parentDir = "dir1/dir2/dir3/";
+    keyName = parentDir + UUID.randomUUID().toString();
+
+    testValidateAndUpdateCache();
   }
 
   @Test
@@ -187,10 +190,9 @@
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
+            getOmKeyCommitRequest(modifiedOmRequest);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -217,13 +219,11 @@
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
         omMetadataManager);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -250,14 +250,12 @@
         doPreExecute(createCommitKeyRequest());
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(modifiedOmRequest);
-
+            getOmKeyCommitRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = getOzonePathKey();
 
     // Key should not be there in key table, as validateAndUpdateCache is
     // still not called.
@@ -286,7 +284,7 @@
   private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
 
     OMKeyCommitRequest omKeyCommitRequest =
-        new OMKeyCommitRequest(originalOMRequest);
+            getOmKeyCommitRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest = omKeyCommitRequest.preExecute(ozoneManager);
 
@@ -364,4 +362,36 @@
     return keyLocations;
   }
 
+  protected String getParentDir() {
+    return parentDir;
+  }
+
+  @NotNull
+  protected String getOzonePathKey() throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+  }
+
+  @NotNull
+  protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList)
+      throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+        clientID, replicationType, replicationFactor, omMetadataManager,
+        locationList);
+
+    return getOzonePathKey();
+  }
+
+  @NotNull
+  protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+    return new OMKeyCommitRequest(omRequest);
+  }
+
+  protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+    Assert.assertEquals("Incorrect KeyName", keyName,
+            omKeyInfo.getKeyName());
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Assert.assertEquals("Incorrect FileName", fileName,
+            omKeyInfo.getFileName());
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
new file mode 100644
index 0000000..f257cc9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Class tests OMKeyCommitRequest with prefix layout.
+ */
+public class TestOMKeyCommitRequestWithFSO extends TestOMKeyCommitRequest {
+
+  private long parentID = Long.MIN_VALUE;
+
+  private long getBucketID() throws java.io.IOException {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    if(omBucketInfo!= null){
+      return omBucketInfo.getObjectID();
+    }
+    // bucket doesn't exists in DB
+    return Long.MIN_VALUE;
+  }
+
+  @Override
+  protected String getOzonePathKey() throws IOException {
+    long bucketID = getBucketID();
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getOzonePathKey(bucketID, fileName);
+  }
+
+  @Override
+  protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList)
+      throws Exception {
+    // need to initialize parentID
+    if (getParentDir() == null) {
+      parentID = getBucketID();
+    } else {
+      parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+              bucketName, getParentDir(), omMetadataManager);
+    }
+    long objectId = 100;
+
+    OmKeyInfo omKeyInfoFSO =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    Time.now());
+    omKeyInfoFSO.appendNewBlocks(locationList, false);
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager);
+
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+
+  @NotNull
+  protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+    return new OMKeyCommitRequestWithFSO(omRequest);
+  }
+
+  @Override
+  protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+    // prefix layout format - stores fileName in the keyName DB field.
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    Assert.assertEquals("Incorrect FileName", fileName,
+            omKeyInfo.getFileName());
+    Assert.assertEquals("Incorrect KeyName", fileName,
+            omKeyInfo.getKeyName());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index 7269957..3df6f38 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -74,7 +74,7 @@
         doPreExecute(createKeyRequest(false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
     // Add volume and bucket entries to DB.
     addVolumeAndBucketToDB(volumeName, bucketName,
@@ -82,8 +82,7 @@
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
+    String openKey = getOpenKey(id);
 
     // Before calling
     OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
@@ -138,7 +137,7 @@
         doPreExecute(createKeyRequest(true, partNumber));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
     // Add volume and bucket entries to DB.
     addVolumeAndBucketToDB(volumeName, bucketName,
@@ -178,7 +177,7 @@
         doPreExecute(createKeyRequest(false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
@@ -217,13 +216,12 @@
             false, 0));
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(modifiedOmRequest);
+            getOMKeyCreateRequest(modifiedOmRequest);
 
 
     long id = modifiedOmRequest.getCreateKeyRequest().getClientID();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, id);
+    String openKey = getOpenKey(id);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
         omMetadataManager);
@@ -248,8 +246,6 @@
 
   }
 
-
-
   /**
    * This method calls preExecute and verify the modified request.
    * @param originalOMRequest
@@ -259,7 +255,7 @@
   private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
 
     OMKeyCreateRequest omKeyCreateRequest =
-        new OMKeyCreateRequest(originalOMRequest);
+            getOMKeyCreateRequest(originalOMRequest);
 
     OMRequest modifiedOmRequest =
         omKeyCreateRequest.preExecute(ozoneManager);
@@ -349,7 +345,7 @@
   @Test
   public void testKeyCreateWithFileSystemPathsEnabled() throws Exception {
 
-    OzoneConfiguration configuration = new OzoneConfiguration();
+    OzoneConfiguration configuration = getOzoneConfiguration();
     configuration.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
     when(ozoneManager.getConfiguration()).thenReturn(configuration);
     when(ozoneManager.getEnableFileSystemPaths()).thenReturn(true);
@@ -367,8 +363,7 @@
     createAndCheck(keyName);
 
     // Commit openKey entry.
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
-        keyName.substring(1), 0L, RATIS, THREE, omMetadataManager);
+    addToKeyTable(keyName);
 
     // Now create another file in same dir path.
     keyName = "/a/b/c/file2";
@@ -430,10 +425,15 @@
 
   }
 
+  protected void addToKeyTable(String keyName) throws Exception {
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+        keyName.substring(1), 0L, RATIS, THREE, omMetadataManager);
+  }
+
 
   private void checkNotAValidPath(String keyName) {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     try {
       omKeyCreateRequest.preExecute(ozoneManager);
@@ -450,11 +450,11 @@
   private void checkNotAFile(String keyName) throws Exception {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
 
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     omRequest = omKeyCreateRequest.preExecute(ozoneManager);
 
-    omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     OMClientResponse omClientResponse =
         omKeyCreateRequest.validateAndUpdateCache(ozoneManager,
@@ -468,11 +468,11 @@
   private void createAndCheck(String keyName) throws Exception {
     OMRequest omRequest = createKeyRequest(false, 0, keyName);
 
-    OMKeyCreateRequest omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     omRequest = omKeyCreateRequest.preExecute(ozoneManager);
 
-    omKeyCreateRequest = new OMKeyCreateRequest(omRequest);
+    omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
 
     OMClientResponse omClientResponse =
         omKeyCreateRequest.validateAndUpdateCache(ozoneManager,
@@ -483,7 +483,7 @@
     checkCreatedPaths(omKeyCreateRequest, omRequest, keyName);
   }
 
-  private void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
+  protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
       OMRequest omRequest, String keyName) throws Exception {
     keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName);
     // Check intermediate directories created or not.
@@ -497,9 +497,7 @@
     Assert.assertNotNull(omKeyInfo);
   }
 
-
-
-  private void checkIntermediatePaths(Path keyPath) throws Exception {
+  protected long checkIntermediatePaths(Path keyPath) throws Exception {
     // Check intermediate paths are created
     keyPath = keyPath.getParent();
     while(keyPath != null) {
@@ -508,6 +506,15 @@
               keyPath.toString())));
       keyPath = keyPath.getParent();
     }
+    return -1;
   }
 
+  protected String getOpenKey(long id) throws IOException {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+            keyName, id);
+  }
+
+  protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) {
+    return new OMKeyCreateRequest(omRequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java
new file mode 100644
index 0000000..c72bd76
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+
+/**
+ * Tests OMCreateKeyRequestWithFSO class.
+ */
+public class TestOMKeyCreateRequestWithFSO extends TestOMKeyCreateRequest {
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+
+  @Override
+  protected void addToKeyTable(String keyName) throws Exception {
+    Path keyPath = Paths.get(keyName);
+    long parentId = checkIntermediatePaths(keyPath);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, fileName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+  }
+
+  @Override
+  protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest,
+      OMRequest omRequest, String keyName) throws Exception {
+    keyName = omKeyCreateRequest.validateAndNormalizeKey(true, keyName);
+    // Check intermediate directories created or not.
+    Path keyPath = Paths.get(keyName);
+    long parentID = checkIntermediatePaths(keyPath);
+
+    // Check open key entry
+    Path keyPathFileName = keyPath.getFileName();
+    Assert.assertNotNull("Failed to find fileName", keyPathFileName);
+    String fileName = keyPathFileName.toString();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            omRequest.getCreateKeyRequest().getClientID());
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    Assert.assertNotNull(omKeyInfo);
+  }
+
+  @Override
+  protected long checkIntermediatePaths(Path keyPath) throws Exception {
+    // Check intermediate paths are created
+    keyPath = keyPath.getParent(); // skip the file name
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    Assert.assertNotNull("Bucket not found!", omBucketInfo);
+    long lastKnownParentId = omBucketInfo.getObjectID();
+
+    Iterator<Path> elements = keyPath.iterator();
+    StringBuilder fullKeyPath = new StringBuilder(bucketKey);
+    while (elements.hasNext()) {
+      String fileName = elements.next().toString();
+      fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
+      fullKeyPath.append(fileName);
+      String dbNodeName = omMetadataManager.getOzonePathKey(
+              lastKnownParentId, fileName);
+      OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
+              get(dbNodeName);
+
+      Assert.assertNotNull("Parent key path:" + fullKeyPath +
+              " doesn't exist", omDirInfo);
+      lastKnownParentId = omDirInfo.getObjectID();
+    }
+
+    return lastKnownParentId;
+  }
+
+  @Override
+  protected String getOpenKey(long id) throws IOException {
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    if (omBucketInfo != null) {
+      return omMetadataManager.getOpenFileName(omBucketInfo.getObjectID(),
+              keyName, id);
+    } else {
+      return omMetadataManager.getOpenFileName(1000, keyName, id);
+    }
+  }
+
+  @Override
+  protected OMKeyCreateRequest getOMKeyCreateRequest(OMRequest omRequest) {
+    return new OMKeyCreateRequestWithFSO(omRequest);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
index b8e5603..b5af354 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
@@ -46,27 +46,23 @@
 
   @Test
   public void testValidateAndUpdateCache() throws Exception {
-    OMRequest modifiedOmRequest =
-        doPreExecute(createDeleteKeyRequest());
-
-    OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
-
     // Add volume, bucket and key entries to OM DB.
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
-
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String ozoneKey = addKeyToTable();
 
     OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
 
     // As we added manually to key table.
     Assert.assertNotNull(omKeyInfo);
 
+    OMRequest modifiedOmRequest =
+            doPreExecute(createDeleteKeyRequest());
+
+    OMKeyDeleteRequest omKeyDeleteRequest =
+            getOmKeyDeleteRequest(modifiedOmRequest);
+
     OMClientResponse omClientResponse =
         omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
         100L, ozoneManagerDoubleBufferHelper);
@@ -86,7 +82,7 @@
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     // Add only volume and bucket entry to DB.
     // In actual implementation we don't check for bucket/volume exists
@@ -108,7 +104,7 @@
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     OMClientResponse omClientResponse =
         omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
@@ -124,7 +120,7 @@
         doPreExecute(createDeleteKeyRequest());
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(modifiedOmRequest);
+            getOmKeyDeleteRequest(modifiedOmRequest);
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
 
@@ -145,7 +141,7 @@
   private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception {
 
     OMKeyDeleteRequest omKeyDeleteRequest =
-        new OMKeyDeleteRequest(originalOmRequest);
+            getOmKeyDeleteRequest(originalOmRequest);
 
     OMRequest modifiedOmRequest = omKeyDeleteRequest.preExecute(ozoneManager);
 
@@ -170,4 +166,18 @@
         .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
         .setClientId(UUID.randomUUID().toString()).build();
   }
+
+  protected String addKeyToTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(false, volumeName,
+            bucketName, keyName, clientID, replicationType, replicationFactor,
+            omMetadataManager);
+
+    return omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+  }
+
+  protected OMKeyDeleteRequest getOmKeyDeleteRequest(
+      OMRequest modifiedOmRequest) {
+    return new OMKeyDeleteRequest(modifiedOmRequest);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
new file mode 100644
index 0000000..3686b6a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.security.acl.OzonePrefixPath;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * Tests OmKeyDelete request with prefix layout.
+ */
+public class TestOMKeyDeleteRequestWithFSO extends TestOMKeyDeleteRequest {
+
+  @Override
+  protected OMKeyDeleteRequest getOmKeyDeleteRequest(
+      OMRequest modifiedOmRequest) {
+    return new OMKeyDeleteRequestWithFSO(modifiedOmRequest);
+  }
+
+  @Override
+  protected String addKeyToTable() throws Exception {
+    String parentDir = "c/d/e";
+    String fileName = "file1";
+    String key = parentDir + "/" + fileName;
+    keyName = key; // updated key name
+
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, parentDir, omMetadataManager);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    omKeyInfo.setKeyName(fileName);
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            fileName, omKeyInfo, -1, 50, omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+
+  @Test
+  public void testOzonePrefixPathViewer() throws Exception {
+    // Add volume, bucket and key entries to OM DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+        omMetadataManager);
+
+    String ozoneKey = addKeyToTable();
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
+
+    // As we added manually to key table.
+    Assert.assertNotNull(omKeyInfo);
+
+    // OzonePrefixPathImpl on a directory
+    OzonePrefixPathImpl ozonePrefixPath = new OzonePrefixPathImpl(volumeName,
+        bucketName, "c", keyManager);
+    OzoneFileStatus status = ozonePrefixPath.getOzoneFileStatus();
+    Assert.assertNotNull(status);
+    Assert.assertEquals("c", status.getTrimmedName());
+    Assert.assertTrue(status.isDirectory());
+    verifyPath(ozonePrefixPath, "c", "c/d");
+    verifyPath(ozonePrefixPath, "c/d", "c/d/e");
+    verifyPath(ozonePrefixPath, "c/d/e", "c/d/e/file1");
+
+    try {
+      ozonePrefixPath.getChildren("c/d/e/file1");
+      Assert.fail("Should throw INVALID_KEY_NAME as the given path is a file.");
+    } catch (OMException ome) {
+      Assert.assertEquals(OMException.ResultCodes.INVALID_KEY_NAME,
+          ome.getResult());
+    }
+
+    // OzonePrefixPathImpl on a file
+    ozonePrefixPath = new OzonePrefixPathImpl(volumeName,
+        bucketName, "c/d/e/file1", keyManager);
+    status = ozonePrefixPath.getOzoneFileStatus();
+    Assert.assertNotNull(status);
+    Assert.assertEquals("c/d/e/file1", status.getTrimmedName());
+    Assert.assertEquals("c/d/e/file1", status.getKeyInfo().getKeyName());
+    Assert.assertTrue(status.isFile());
+  }
+
+  private void verifyPath(OzonePrefixPath ozonePrefixPath, String pathName,
+                          String expectedPath)
+      throws IOException {
+    Iterator<? extends OzoneFileStatus> pathItr = ozonePrefixPath.getChildren(
+        pathName);
+    Assert.assertTrue("Failed to list keyPaths", pathItr.hasNext());
+    Assert.assertEquals(expectedPath, pathItr.next().getTrimmedName());
+    try{
+      pathItr.next();
+      Assert.fail("Reached end of the list!");
+    } catch (NoSuchElementException nse){
+      // expected
+    }
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index 861098a..22f8fa2 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -27,10 +27,13 @@
 import org.apache.hadoop.ozone.om.ResolvedBucket;
 import org.apache.hadoop.ozone.om.KeyManager;
 import org.apache.hadoop.ozone.om.KeyManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
 import org.apache.hadoop.ozone.om.request.OMClientRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.rules.TemporaryFolder;
@@ -93,6 +96,7 @@
   protected long scmBlockSize = 1000L;
   protected long dataSize;
   protected Random random;
+  protected long txnLogId = 100000L;
 
   // Just setting ozoneManagerDoubleBuffer which does nothing.
   protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
@@ -105,7 +109,7 @@
   public void setup() throws Exception {
     ozoneManager = Mockito.mock(OzoneManager.class);
     omMetrics = OMMetrics.create();
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
         folder.newFolder().getAbsolutePath());
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -174,6 +178,34 @@
         .thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket));
   }
 
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
+
+  /**
+   * Verify path in open key table. Also, it returns OMKeyInfo for the given
+   * key path.
+   *
+   * @param key      key name
+   * @param id       client id
+   * @param doAssert if true then do assertion, otherwise it just skip.
+   * @return om key info for the given key path.
+   * @throws Exception DB failure
+   */
+  protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+                                               boolean doAssert)
+          throws Exception {
+    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
+            key, id);
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+    if (doAssert) {
+      Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
+    }
+    return omKeyInfo;
+  }
+
   @After
   public void stop() {
     omMetrics.unRegister();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
index 088b232..01561e9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
@@ -56,7 +56,7 @@
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
+        getS3InitiateMultipartUploadReq(modifiedRequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -65,8 +65,8 @@
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        modifiedRequest.getInitiateMultiPartUploadRequest()
             .getKeyArgs().getMultipartUploadID());
 
     Assert.assertNotNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
@@ -102,7 +102,7 @@
         volumeName, bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
+        getS3InitiateMultipartUploadReq(modifiedRequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -111,8 +111,8 @@
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        modifiedRequest.getInitiateMultiPartUploadRequest()
             .getKeyArgs().getMultipartUploadID());
 
     Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
@@ -130,7 +130,7 @@
         keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(modifiedRequest);
+        getS3InitiateMultipartUploadReq(modifiedRequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -139,12 +139,18 @@
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest()
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+        modifiedRequest.getInitiateMultiPartUploadRequest()
             .getKeyArgs().getMultipartUploadID());
 
     Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
     Assert.assertNull(omMetadataManager.getMultipartInfoTable()
         .get(multipartKey));
   }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+                                   String keyName, String multipartUploadID) {
+    return omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
new file mode 100644
index 0000000..0e41e7c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Tests S3 Initiate Multipart Upload request.
+ */
+public class TestS3InitiateMultipartUploadRequestWithFSO
+    extends TestS3InitiateMultipartUploadRequest {
+
+  @Test
+  public void testValidateAndUpdateCache() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String prefix = "a/b/c/";
+    List<String> dirs = new ArrayList<String>();
+    dirs.add("a");
+    dirs.add("b");
+    dirs.add("c");
+    String fileName = UUID.randomUUID().toString();
+    String keyName = prefix + fileName;
+
+    // Add volume and bucket to DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+        omMetadataManager);
+
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+    long bucketID = omBucketInfo.getObjectID();
+
+    OMRequest modifiedRequest = doPreExecuteInitiateMPUWithFSO(volumeName,
+        bucketName, keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadReqFSO =
+        getS3InitiateMultipartUploadReq(modifiedRequest);
+
+    OMClientResponse omClientResponse =
+            s3InitiateMultipartUploadReqFSO.validateAndUpdateCache(
+                    ozoneManager, 100L,
+                    ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+            omClientResponse.getOMResponse().getStatus());
+
+    long parentID = verifyDirectoriesInDB(dirs, bucketID);
+
+    String multipartFileKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName,
+            modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs()
+                .getMultipartUploadID());
+
+    String multipartOpenFileKey = omMetadataManager.getMultipartKey(parentID,
+            fileName, modifiedRequest.getInitiateMultiPartUploadRequest()
+                    .getKeyArgs().getMultipartUploadID());
+
+    OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+            .get(multipartOpenFileKey);
+    Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
+    Assert.assertEquals("FileName mismatches!", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omKeyInfo.getParentObjectID());
+
+    OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager
+            .getMultipartInfoTable().get(multipartFileKey);
+    Assert.assertNotNull("Failed to find the multipartFileInfo",
+            omMultipartKeyInfo);
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omMultipartKeyInfo.getParentID());
+
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID(),
+        omMultipartKeyInfo
+            .getUploadID());
+
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+        .getKeyArgs().getModificationTime(),
+        omKeyInfo
+        .getModificationTime());
+    Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime(),
+        omKeyInfo
+            .getCreationTime());
+  }
+
+  private long verifyDirectoriesInDB(List<String> dirs, long bucketID)
+      throws IOException {
+    // bucketID is the parent
+    long parentID = bucketID;
+    for (int indx = 0; indx < dirs.size(); indx++) {
+      String dirName = dirs.get(indx);
+      String dbKey = "";
+      // for index=0, parentID is bucketID
+      dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+      OmDirectoryInfo omDirInfo =
+              omMetadataManager.getDirectoryTable().get(dbKey);
+      Assert.assertNotNull("Invalid directory!", omDirInfo);
+      Assert.assertEquals("Invalid directory!", dirName, omDirInfo.getName());
+      Assert.assertEquals("Invalid dir path!",
+              parentID + "/" + dirName, omDirInfo.getPath());
+      parentID = omDirInfo.getObjectID();
+    }
+    return parentID;
+  }
+
+  @Override
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+      OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
index f2c5b66..5fe2c0d 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java
@@ -115,7 +115,7 @@
             keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(omRequest);
+        getS3InitiateMultipartUploadReq(omRequest);
 
     OMRequest modifiedRequest =
         s3InitiateMultipartUploadRequest.preExecute(ozoneManager);
@@ -152,8 +152,7 @@
         TestOMRequestUtils.createCommitPartMPURequest(volumeName, bucketName,
             keyName, clientID, dataSize, multipartUploadID, partNumber);
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(omRequest);
-
+            getS3MultipartUploadCommitReq(omRequest);
 
     OMRequest modifiedRequest =
         s3MultipartUploadCommitPartRequest.preExecute(ozoneManager);
@@ -184,7 +183,7 @@
 
 
     S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(omRequest);
+        getS3MultipartUploadAbortReq(omRequest);
 
     OMRequest modifiedRequest =
         s3MultipartUploadAbortRequest.preExecute(ozoneManager);
@@ -205,7 +204,7 @@
             keyName, multipartUploadID, partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(omRequest);
+            getS3MultipartUploadCompleteReq(omRequest);
 
     OMRequest modifiedRequest =
         s3MultipartUploadCompleteRequest.preExecute(ozoneManager);
@@ -218,4 +217,55 @@
   }
 
 
+  /**
+   * Perform preExecute of Initiate Multipart upload request for given
+   * volume, bucket and key name.
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @return OMRequest - returned from preExecute.
+   */
+  protected OMRequest doPreExecuteInitiateMPUWithFSO(
+      String volumeName, String bucketName, String keyName) throws Exception {
+    OMRequest omRequest =
+            TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
+                    keyName);
+
+    S3InitiateMultipartUploadRequestWithFSO
+        s3InitiateMultipartUploadRequestWithFSO =
+            new S3InitiateMultipartUploadRequestWithFSO(omRequest);
+
+    OMRequest modifiedRequest =
+            s3InitiateMultipartUploadRequestWithFSO.preExecute(ozoneManager);
+
+    Assert.assertNotEquals(omRequest, modifiedRequest);
+    Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest());
+    Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID());
+    Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime() > 0);
+
+    return modifiedRequest;
+  }
+
+  protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCompleteRequest(omRequest);
+  }
+
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequest(omRequest);
+  }
+
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequest(initiateMPURequest);
+  }
+
+  protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq(
+      OMRequest omRequest) {
+    return new S3MultipartUploadAbortRequest(omRequest);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
index d0b61c7..d6ded0a 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java
@@ -50,16 +50,18 @@
   public void testValidateAndUpdateCache() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
+    createParentPath(volumeName, bucketName);
+
     OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
+        getS3InitiateMultipartUploadReq(initiateMPURequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -73,22 +75,25 @@
             multipartUploadID);
 
     S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(abortMPURequest);
+        getS3MultipartUploadAbortReq(abortMPURequest);
 
     omClientResponse =
         s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
             ozoneManagerDoubleBufferHelper);
 
-
     String multipartKey = omMetadataManager.getMultipartKey(volumeName,
         bucketName, keyName, multipartUploadID);
 
+    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName,
+        keyName, multipartUploadID);
+
     // Check table and response.
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
-    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert
+        .assertNull(omMetadataManager.getOpenKeyTable().get(multipartOpenKey));
 
   }
 
@@ -108,7 +113,7 @@
             multipartUploadID);
 
     S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest =
-        new S3MultipartUploadAbortRequest(abortMPURequest);
+        getS3MultipartUploadAbortReq(abortMPURequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L,
@@ -177,4 +182,19 @@
         omClientResponse.getOMResponse().getStatus());
 
   }
+
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
+
+  protected void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // no parent hierarchy
+  }
+
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    return omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java
new file mode 100644
index 0000000..044f8a6
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+
+import java.util.UUID;
+
+/**
+ * Test Multipart upload abort request.
+ */
+public class TestS3MultipartUploadAbortRequestWithFSO
+    extends TestS3MultipartUploadAbortRequest {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Override
+  protected S3MultipartUploadAbortRequest getS3MultipartUploadAbortReq(
+      OMRequest omRequest) {
+    return new S3MultipartUploadAbortRequestWithFSO(omRequest);
+  }
+
+  @Override
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+      OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest);
+  }
+
+  @Override
+  protected String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  @Override
+  protected void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+        dirName, omMetadataManager);
+  }
+
+  @Override
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+        multipartUploadID);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
index d623b17..a285ba7 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java
@@ -41,27 +41,28 @@
   public void testPreExecute() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     doPreExecuteCommitMPU(volumeName, bucketName, keyName, Time.now(),
         UUID.randomUUID().toString(), 1);
   }
 
-
   @Test
   public void testValidateAndUpdateCacheSuccess() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
+    createParentPath(volumeName, bucketName);
+
     OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
+            getS3InitiateMultipartUploadReq(initiateMPURequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -75,12 +76,10 @@
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName, keyName, clientID);
 
     omClientResponse =
         s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
@@ -90,6 +89,9 @@
     Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
         == OzoneManagerProtocolProtos.Status.OK);
 
+    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName,
+        keyName, multipartUploadID);
+
     String multipartKey = omMetadataManager.getMultipartKey(volumeName,
         bucketName, keyName, multipartUploadID);
 
@@ -97,21 +99,23 @@
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
     Assert.assertTrue(omMetadataManager.getMultipartInfoTable()
         .get(multipartKey).getPartKeyInfoMap().size() == 1);
-    Assert.assertNull(omMetadataManager.getOpenKeyTable()
-        .get(omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
-            clientID)));
+    Assert.assertNotNull(omMetadataManager.getOpenKeyTable()
+        .get(multipartOpenKey));
 
+    String partKey = getOpenKey(volumeName, bucketName, keyName, clientID);
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(partKey));
   }
 
   @Test
   public void testValidateAndUpdateCacheMultipartNotFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
 
+    createParentPath(volumeName, bucketName);
 
     long clientID = Time.now();
     String multipartUploadID = UUID.randomUUID().toString();
@@ -120,12 +124,10 @@
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToOpenKeyTable(volumeName, bucketName, keyName, clientID);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
@@ -147,7 +149,7 @@
   public void testValidateAndUpdateCacheKeyNotFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
@@ -163,7 +165,7 @@
     // part. It will fail with KEY_NOT_FOUND
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
 
     OMClientResponse omClientResponse =
@@ -180,7 +182,7 @@
   public void testValidateAndUpdateCacheBucketFound() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
 
@@ -195,7 +197,7 @@
     // part. It will fail with KEY_NOT_FOUND
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
 
     OMClientResponse omClientResponse =
@@ -206,4 +208,32 @@
         == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
 
   }
+
+  protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+  }
+
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
+
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    return omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+  }
+
+  protected String getOpenKey(String volumeName, String bucketName,
+      String keyName, long clientID) {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+        keyName, clientID);
+  }
+
+  protected void createParentPath(String volumeName, String bucketName)
+          throws Exception {
+    // no parent hierarchy
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java
new file mode 100644
index 0000000..bd7f431
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+
+import java.util.UUID;
+
+/**
+ * Tests S3 Multipart upload commit part request.
+ */
+public class TestS3MultipartUploadCommitPartRequestWithFSO
+    extends TestS3MultipartUploadCommitPartRequest {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Override
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequestWithFSO(omRequest);
+  }
+
+  @Override
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest);
+  }
+
+  @Override
+  protected String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  @Override
+  protected void addKeyToOpenKeyTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    long txnLogId = 10000;
+    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID,
+            txnLogId, Time.now());
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfo, clientID, txnLogId, omMetadataManager);
+  }
+
+  @Override
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+  }
+
+  @Override
+  protected String getOpenKey(String volumeName, String bucketName,
+      String keyName, long clientID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getOpenFileName(parentID, fileName, clientID);
+  }
+
+  @Override
+  protected OMRequest doPreExecuteInitiateMPU(String volumeName,
+      String bucketName, String keyName) throws Exception {
+    OMRequest omRequest =
+            TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName,
+                    keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+            new S3InitiateMultipartUploadRequestWithFSO(omRequest);
+
+    OMRequest modifiedRequest =
+            s3InitiateMultipartUploadRequest.preExecute(ozoneManager);
+
+    Assert.assertNotEquals(omRequest, modifiedRequest);
+    Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest());
+    Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getMultipartUploadID());
+    Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest()
+            .getKeyArgs().getModificationTime() > 0);
+
+    return modifiedRequest;
+  }
+
+  @Override
+  protected void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
index a04f51f..3d399b1 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.om.request.s3.multipart;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
@@ -55,7 +56,7 @@
   public void testValidateAndUpdateCacheSuccess() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
 
     TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
         omMetadataManager);
@@ -64,7 +65,7 @@
         bucketName, keyName);
 
     S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
-        new S3InitiateMultipartUploadRequest(initiateMPURequest);
+        getS3InitiateMultipartUploadReq(initiateMPURequest);
 
     OMClientResponse omClientResponse =
         s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager,
@@ -78,27 +79,25 @@
         bucketName, keyName, clientID, multipartUploadID, 1);
 
     S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
-        new S3MultipartUploadCommitPartRequest(commitMultipartRequest);
+        getS3MultipartUploadCommitReq(commitMultipartRequest);
 
     // Add key to open key table.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
-        keyName, clientID, HddsProtos.ReplicationType.RATIS,
-        HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+    addKeyToTable(volumeName, bucketName, keyName, clientID);
 
     s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
         2L, ozoneManagerDoubleBufferHelper);
 
     List<Part> partList = new ArrayList<>();
 
-    partList.add(Part.newBuilder().setPartName(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName) +
-            clientID).setPartNumber(1).build());
+    String partName = getPartName(volumeName, bucketName, keyName, clientID);
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1)
+            .build());
 
     OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
         bucketName, keyName, multipartUploadID, partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+        getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -107,14 +106,71 @@
     Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
         omClientResponse.getOMResponse().getStatus());
 
-    String multipartKey = omMetadataManager.getMultipartKey(volumeName,
-        bucketName, keyName, multipartUploadID);
+    String multipartKey = getMultipartKey(volumeName, bucketName, keyName,
+            multipartUploadID);
 
     Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
     Assert.assertNull(
         omMetadataManager.getMultipartInfoTable().get(multipartKey));
     Assert.assertNotNull(omMetadataManager.getKeyTable().get(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+            getOzoneDBKey(volumeName, bucketName, keyName)));
+  }
+
+  @Test
+  public void testInvalidPartOrderError() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName,
+            bucketName, keyName);
+
+    S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest =
+            getS3InitiateMultipartUploadReq(initiateMPURequest);
+
+    OMClientResponse omClientResponse =
+            s3InitiateMultipartUploadRequest.validateAndUpdateCache(
+                    ozoneManager, 1L, ozoneManagerDoubleBufferHelper);
+
+    long clientID = Time.now();
+    String multipartUploadID = omClientResponse.getOMResponse()
+            .getInitiateMultiPartUploadResponse().getMultipartUploadID();
+
+    OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName,
+            bucketName, keyName, clientID, multipartUploadID, 1);
+
+    S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest =
+            getS3MultipartUploadCommitReq(commitMultipartRequest);
+
+    // Add key to open key table.
+    addKeyToTable(volumeName, bucketName, keyName, clientID);
+
+    s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager,
+            2L, ozoneManagerDoubleBufferHelper);
+
+    List<Part> partList = new ArrayList<>();
+
+    String partName = getPartName(volumeName, bucketName, keyName, clientID);
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23)
+            .build());
+    partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1)
+            .build());
+
+    OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName,
+            bucketName, keyName, multipartUploadID, partList);
+
+    S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
+
+    omClientResponse =
+            s3MultipartUploadCompleteRequest.validateAndUpdateCache(
+                    ozoneManager, 3L, ozoneManagerDoubleBufferHelper);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_PART_ORDER,
+            omClientResponse.getOMResponse().getStatus());
   }
 
   @Test
@@ -129,7 +185,7 @@
         bucketName, keyName, UUID.randomUUID().toString(), partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+        getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -153,7 +209,7 @@
         bucketName, keyName, UUID.randomUUID().toString(), partList);
 
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -180,7 +236,7 @@
 
     // Doing  complete multipart upload request with out initiate.
     S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest =
-        new S3MultipartUploadCompleteRequest(completeMultipartRequest);
+            getS3MultipartUploadCompleteReq(completeMultipartRequest);
 
     OMClientResponse omClientResponse =
         s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager,
@@ -191,5 +247,35 @@
         omClientResponse.getOMResponse().getStatus());
 
   }
+
+  protected void addKeyToTable(String volumeName, String bucketName,
+                             String keyName, long clientID) throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName,
+            keyName, clientID, HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+  }
+
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) throws IOException {
+    return omMetadataManager.getMultipartKey(volumeName,
+            bucketName, keyName, multipartUploadID);
+  }
+
+  private String getPartName(String volumeName, String bucketName,
+      String keyName, long clientID) throws IOException {
+
+    String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+    return dbOzoneKey + clientID;
+  }
+
+  protected String getOzoneDBKey(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
+  }
+
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
 }
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java
new file mode 100644
index 0000000..17b4d9d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.multipart;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Iterator;
+import java.util.UUID;
+
+/**
+ * Tests S3 Multipart Upload Complete request.
+ */
+public class TestS3MultipartUploadCompleteRequestWithFSO
+    extends TestS3MultipartUploadCompleteRequest {
+
+  @BeforeClass
+  public static void init() {
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+  }
+
+  @Override
+  protected String getKeyName() {
+    String parentDir = UUID.randomUUID().toString() + "/a/b/c";
+    String fileName = "file1";
+    String keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
+    return keyName;
+  }
+
+  @Override
+  protected void addKeyToTable(String volumeName, String bucketName,
+      String keyName, long clientID) throws Exception {
+    // need to initialize parentID
+    String parentDir = OzoneFSUtils.getParentDir(keyName);
+    Assert.assertNotEquals("Parent doesn't exists!", parentDir, keyName);
+
+    // add parentDir to dirTable
+    long parentID = getParentID(volumeName, bucketName, keyName);
+    long txnId = 50;
+    long objectId = parentID + 1;
+
+    OmKeyInfo omKeyInfoFSO =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    omKeyInfoFSO.setKeyName(fileName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoFSO, clientID, omKeyInfoFSO.getObjectID(),
+            omMetadataManager);
+  }
+
+  @Override
+  protected String getMultipartKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) throws IOException {
+    OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists(
+            omMetadataManager, volumeName,
+            bucketName, keyName, 0);
+
+    Assert.assertNotNull("key not found in DB!", keyStatus);
+
+    return omMetadataManager.getMultipartKey(keyStatus.getKeyInfo()
+                    .getParentObjectID(), keyStatus.getTrimmedName(),
+            multipartUploadID);
+  }
+
+  private long getParentID(String volumeName, String bucketName,
+                           String keyName) throws IOException {
+    Path keyPath = Paths.get(keyName);
+    Iterator<Path> elements = keyPath.iterator();
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+
+    return OMFileRequest.getParentID(omBucketInfo.getObjectID(),
+            elements, keyName, omMetadataManager);
+  }
+
+  @Override
+  protected String getOzoneDBKey(String volumeName, String bucketName,
+                                 String keyName) throws IOException {
+    long parentID = getParentID(volumeName, bucketName, keyName);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getOzonePathKey(parentID, fileName);
+  }
+
+  @Override
+  protected S3MultipartUploadCompleteRequest getS3MultipartUploadCompleteReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCompleteRequestWithFSO(omRequest);
+  }
+
+  @Override
+  protected S3MultipartUploadCommitPartRequest getS3MultipartUploadCommitReq(
+          OMRequest omRequest) {
+    return new S3MultipartUploadCommitPartRequestWithFSO(omRequest);
+  }
+
+  @Override
+  protected S3InitiateMultipartUploadRequest getS3InitiateMultipartUploadReq(
+          OMRequest initiateMPURequest) {
+    return new S3InitiateMultipartUploadRequestWithFSO(initiateMPURequest);
+  }
+
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java
new file mode 100644
index 0000000..628ac65
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Tests OMDirectoryCreateResponseWithFSO - prefix layout.
+ */
+public class TestOMDirectoryCreateResponseWithFSO {
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OMMetadataManager omMetadataManager;
+  private BatchOperation batchOperation;
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+        folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    batchOperation = omMetadataManager.getStore().initBatchOperation();
+  }
+
+  @Test
+  public void testAddToDBBatch() throws Exception {
+
+    String keyName = UUID.randomUUID().toString();
+
+    long parentID = 100;
+    OmDirectoryInfo omDirInfo =
+            TestOMRequestUtils.createOmDirectoryInfo(keyName, 500, parentID);
+
+    OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse(
+        OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance())
+            .setStatus(OzoneManagerProtocolProtos.Status.OK)
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+            .build();
+
+    OMDirectoryCreateResponseWithFSO omDirectoryCreateResponseWithFSO =
+        new OMDirectoryCreateResponseWithFSO(omResponse, omDirInfo,
+            new ArrayList<>(), OMDirectoryCreateRequestWithFSO.Result.SUCCESS);
+
+    omDirectoryCreateResponseWithFSO
+        .addToDBBatch(omMetadataManager, batchOperation);
+
+    // Do manual commit and see whether addToBatch is successful or not.
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNotNull(omMetadataManager.getDirectoryTable().get(
+            omMetadataManager.getOzonePathKey(parentID, keyName)));
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java
new file mode 100644
index 0000000..4697079
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
+import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.util.ArrayList;
+
+/**
+ * Tests MKeyCreateResponse - prefix layout1.
+ */
+public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse {
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMFileCreateResponseWithFSO(response, keyInfo,
+        new ArrayList<>(), clientID, bucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
index 602ec99..33c16ae 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java
@@ -20,6 +20,7 @@
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -37,8 +38,7 @@
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
+    OmKeyInfo omKeyInfo = createOmKeyInfo();
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
@@ -50,11 +50,9 @@
         .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
         .build();
     OMAllocateBlockResponse omAllocateBlockResponse =
-        new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
-            omBucketInfo);
+            getOmAllocateBlockResponse(omKeyInfo, omBucketInfo, omResponse);
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKey();
 
     // Not adding key entry before to test whether commit is successful or not.
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
@@ -68,8 +66,7 @@
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
+    OmKeyInfo omKeyInfo = createOmKeyInfo();
     OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
@@ -81,12 +78,10 @@
         .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock)
         .build();
     OMAllocateBlockResponse omAllocateBlockResponse =
-        new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
-            omBucketInfo);
+            getOmAllocateBlockResponse(omKeyInfo, omBucketInfo, omResponse);
 
     // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKey();
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
     omAllocateBlockResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
@@ -98,4 +93,22 @@
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
   }
+
+  protected OmKeyInfo createOmKeyInfo() throws Exception {
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            bucketName, keyName, replicationType, replicationFactor);
+  }
+
+  protected String getOpenKey() throws Exception {
+    return omMetadataManager.getOpenKey(volumeName, bucketName,
+            keyName, clientID);
+  }
+
+  @NotNull
+  protected OMAllocateBlockResponse getOmAllocateBlockResponse(
+          OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo,
+          OMResponse omResponse) {
+    return new OMAllocateBlockResponse(omResponse, omKeyInfo, clientID,
+            omBucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java
new file mode 100644
index 0000000..4b99d76
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Tests OMAllocateBlockResponse - prefix layout.
+ */
+public class TestOMAllocateBlockResponseWithFSO
+        extends TestOMAllocateBlockResponse {
+
+  // logical ID, which really doesn't exist in dirTable
+  private long parentID = 10;
+  private String fileName = "file1";
+
+  @Override
+  protected OmKeyInfo createOmKeyInfo() throws Exception {
+    // need to initialize parentID
+    String parentDir = keyName;
+    keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName;
+
+    long txnId = 50;
+    long objectId = parentID + 1;
+
+    OmKeyInfo omKeyInfoFSO =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+    return omKeyInfoFSO;
+  }
+
+  @Override
+  protected String getOpenKey() throws Exception {
+    return omMetadataManager.getOpenFileName(
+            parentID, fileName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected OMAllocateBlockResponse getOmAllocateBlockResponse(
+          OmKeyInfo omKeyInfo, OmBucketInfo omBucketInfo,
+          OMResponse omResponse) {
+    return new OMAllocateBlockResponseWithFSO(omResponse, omKeyInfo, clientID,
+            omBucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
index 2d63ebd..e2a223c 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
@@ -20,6 +20,7 @@
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -30,17 +31,18 @@
 /**
  * Tests OMKeyCommitResponse.
  */
+@SuppressWarnings("visibilitymodifier")
 public class TestOMKeyCommitResponse extends TestOMKeyResponse {
 
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
             OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance())
@@ -50,17 +52,14 @@
 
     // As during commit Key, entry will be already there in openKeyTable.
     // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-    OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
-        omResponse, omKeyInfo, ozoneKey, openKey, omBucketInfo);
+    String ozoneKey = getOzoneKey();
+    OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+            omKeyInfo, omResponse, openKey, ozoneKey);
 
     omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -69,8 +68,7 @@
 
     // When key commit key is deleted from openKey table and added to keyTable.
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+    Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
   }
 
   @Test
@@ -78,7 +76,7 @@
 
     OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
         bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
@@ -89,18 +87,15 @@
             .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey)
             .build();
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
+    String openKey = getOpenKeyName();
+    String ozoneKey = getOzoneKey();
 
-    OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
-        omResponse, omKeyInfo, ozoneKey, openKey, omBucketInfo);
+    OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+            omKeyInfo, omResponse, openKey, ozoneKey);
 
     // As during commit Key, entry will be already there in openKeyTable.
     // Adding it here.
-    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    addKeyToOpenKeyTable();
 
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
@@ -113,7 +108,28 @@
     // As omResponse is error it is a no-op. So, entry should still be in
     // openKey table.
     Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
-    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(
-        omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+    Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
+  }
+
+  @NotNull
+  protected void addKeyToOpenKeyTable() throws Exception {
+    TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+  }
+
+  @NotNull
+  protected String getOzoneKey() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOzoneKey(volumeName,
+            omBucketInfo.getBucketName(), keyName);
+  }
+
+  @NotNull
+  protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+          String ozoneKey) {
+    Assert.assertNotNull(omBucketInfo);
+    return new OMKeyCommitResponse(omResponse, omKeyInfo, ozoneKey, openKey,
+            omBucketInfo);
   }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java
new file mode 100644
index 0000000..3069e2f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMKeyCommitResponse - prefix layout.
+ */
+public class TestOMKeyCommitResponseWithFSO extends TestOMKeyCommitResponse {
+
+  @NotNull
+  @Override
+  protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+      String ozoneKey) {
+    Assert.assertNotNull(omBucketInfo);
+    return new OMKeyCommitResponseWithFSO(omResponse, omKeyInfo, ozoneKey,
+        openKey, omBucketInfo);
+  }
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected void addKeyToOpenKeyTable() throws Exception {
+    Assert.assertNotNull(omBucketInfo);
+    long parentID = omBucketInfo.getObjectID();
+    long objectId = parentID + 10;
+
+    OmKeyInfo omKeyInfoFSO =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+                    Time.now());
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager);
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected String getOzoneKey() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
+            keyName);
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
index 4bef2ef..7566afb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
@@ -20,11 +20,11 @@
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .CreateKeyResponse;
@@ -40,13 +40,12 @@
   @Test
   public void testAddToDBBatch() throws Exception {
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
                 CreateKeyResponse.getDefaultInstance())
             .setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -54,11 +53,11 @@
             .build();
 
     OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
-            omBucketInfo);
+            getOmKeyCreateResponse(omKeyInfo, omBucketInfo,
+                    omResponse);
 
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
+
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
     omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -70,13 +69,13 @@
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
 
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+    omBucketInfo = OmBucketInfo.newBuilder()
         .setVolumeName(volumeName).setBucketName(bucketName)
         .setCreationTime(Time.now()).build();
 
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
+
     OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
         CreateKeyResponse.getDefaultInstance())
         .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
@@ -84,12 +83,11 @@
         .build();
 
     OMKeyCreateResponse omKeyCreateResponse =
-        new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
-            omBucketInfo);
+            getOmKeyCreateResponse(omKeyInfo, omBucketInfo,
+                    omResponse);
 
     // Before calling addToDBBatch
-    String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
-        keyName, clientID);
+    String openKey = getOpenKeyName();
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
     omKeyCreateResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
@@ -101,4 +99,12 @@
     Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
 
   }
+
+  @NotNull
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMKeyCreateResponse(response, keyInfo, null, clientID,
+            bucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java
new file mode 100644
index 0000000..59cdcd3
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.util.ArrayList;
+
+/**
+ * Tests OMKeyCreateResponseWithFSO.
+ */
+public class TestOMKeyCreateResponseWithFSO extends TestOMKeyCreateResponse {
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+
+  @NotNull
+  @Override
+  protected String getOpenKeyName() {
+    Assert.assertNotNull(omBucketInfo);
+    return omMetadataManager.getOpenFileName(
+            omBucketInfo.getObjectID(), keyName, clientID);
+  }
+
+  @NotNull
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(omBucketInfo);
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            omBucketInfo.getBucketName(), keyName, replicationType,
+            replicationFactor,
+            omBucketInfo.getObjectID() + 1,
+            omBucketInfo.getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, OMResponse response) {
+
+    return new OMKeyCreateResponseWithFSO(response, keyInfo, new ArrayList<>(),
+        clientID, bucketInfo);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
index 97ade26..8306ee6 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java
@@ -40,14 +40,15 @@
  */
 public class TestOMKeyDeleteResponse extends TestOMKeyResponse {
 
+  private OmBucketInfo omBucketInfo;
+
   @Test
   public void testAddToDBBatch() throws Exception {
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -56,14 +57,10 @@
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    String ozoneKey = addKeyToTable();
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
     omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -73,20 +70,22 @@
 
     Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
+    String deletedKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+        keyName);
+
     // As default key entry does not have any blocks, it should not be in
     // deletedKeyTable.
     Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(
-        ozoneKey));
+        deletedKey));
   }
 
   @Test
   public void testAddToDBBatchWithNonEmptyBlocks() throws Exception {
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
 
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     // Add block to key.
     List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
@@ -108,10 +107,7 @@
 
     omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+    String ozoneKey = addKeyToTable();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -120,8 +116,8 @@
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
     omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -131,20 +127,20 @@
 
     Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
+    String deletedKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+        keyName);
+
     // Key has blocks, it should not be in deletedKeyTable.
-    Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(
-        ozoneKey));
+    Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(deletedKey));
   }
 
 
   @Test
   public void testAddToDBBatchWithErrorResponse() throws Exception {
-    OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
-        bucketName, keyName, replicationType, replicationFactor);
-
-    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName(volumeName).setBucketName(bucketName)
-        .setCreationTime(Time.now()).build();
+    omBucketInfo = OmBucketInfo.newBuilder()
+            .setVolumeName(volumeName).setBucketName(bucketName)
+            .setCreationTime(Time.now()).build();
+    OmKeyInfo omKeyInfo = getOmKeyInfo();
 
     OzoneManagerProtocolProtos.OMResponse omResponse =
         OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse(
@@ -153,14 +149,10 @@
             .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey)
             .build();
 
-    OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse(
-        omResponse, omKeyInfo, true, omBucketInfo);
+    OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo,
+            omResponse);
 
-    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
-        keyName);
-
-    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
-        clientID, replicationType, replicationFactor, omMetadataManager);
+    String ozoneKey = addKeyToTable();
 
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
@@ -174,4 +166,22 @@
     Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
 
   }
+
+  protected String addKeyToTable() throws Exception {
+    String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+            keyName);
+
+    TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
+            clientID, replicationType, replicationFactor, omMetadataManager);
+    return ozoneKey;
+  }
+
+  protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new OMKeyDeleteResponse(omResponse, omKeyInfo, true, omBucketInfo);
+  }
+
+  protected OmBucketInfo getOmBucketInfo() {
+    return omBucketInfo;
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java
new file mode 100644
index 0000000..9a68f96
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMKeyDeleteResponse - prefix layout.
+ */
+public class TestOMKeyDeleteResponseWithFSO extends TestOMKeyDeleteResponse {
+
+  @Override
+  protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new OMKeyDeleteResponseWithFSO(omResponse, omKeyInfo.getKeyName(),
+        omKeyInfo, true, getOmBucketInfo(), false);
+  }
+
+  @Override
+  protected String addKeyToTable() throws Exception {
+    // Add volume, bucket and key entries to OM DB.
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    // Create parent dirs for the path
+    long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+            bucketName, "", omMetadataManager);
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentId + 1,
+                    parentId, 100, Time.now());
+    TestOMRequestUtils.addFileToKeyTable(false, false,
+            keyName, omKeyInfo, -1, 50, omMetadataManager);
+    return omKeyInfo.getPath();
+  }
+
+  @Override
+  protected OmKeyInfo getOmKeyInfo() {
+    Assert.assertNotNull(getOmBucketInfo());
+    return TestOMRequestUtils.createOmKeyInfo(volumeName,
+            getOmBucketInfo().getBucketName(), keyName, replicationType,
+            replicationFactor,
+            getOmBucketInfo().getObjectID() + 1,
+            getOmBucketInfo().getObjectID(), 100, Time.now());
+  }
+
+  @NotNull
+  @Override
+  protected OzoneConfiguration getOzoneConfiguration() {
+    OzoneConfiguration config = super.getOzoneConfiguration();
+    // Metadata layout prefix will be set while invoking OzoneManager#start()
+    // and its not invoked in this test. Hence it is explicitly setting
+    // this configuration to populate prefix tables.
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+    return config;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
index 312fcaf..df7fd01 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
@@ -21,6 +21,10 @@
 import java.util.Random;
 import java.util.UUID;
 
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.jetbrains.annotations.NotNull;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -50,12 +54,14 @@
   protected String keyName;
   protected HddsProtos.ReplicationFactor replicationFactor;
   protected HddsProtos.ReplicationType replicationType;
+  protected OmBucketInfo omBucketInfo;
   protected long clientID;
   protected Random random;
+  protected long txnLogId = 100000L;
 
   @Before
   public void setup() throws Exception {
-    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
     ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
         folder.newFolder().getAbsolutePath());
     omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -70,6 +76,23 @@
     random = new Random();
   }
 
+  @NotNull
+  protected String getOpenKeyName() {
+    return omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
+            clientID);
+  }
+
+  @NotNull
+  protected OmKeyInfo getOmKeyInfo() {
+    return TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+            replicationType, replicationFactor);
+  }
+
+  @NotNull
+  protected OzoneConfiguration getOzoneConfiguration() {
+    return new OzoneConfiguration();
+  }
+
   @After
   public void stop() {
     Mockito.framework().clearInlineMocks();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
index 4996bd0..03065ab 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java
@@ -31,7 +31,7 @@
     extends TestS3MultipartResponse {
 
   @Test
-  public void addDBToBatch() throws Exception {
+  public void testAddDBToBatch() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     String keyName = UUID.randomUUID().toString();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseWithFSO.java
new file mode 100644
index 0000000..6ef79d4
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponseWithFSO.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Class tests S3 Initiate MPU response.
+ */
+public class TestS3InitiateMultipartUploadResponseWithFSO
+    extends TestS3InitiateMultipartUploadResponse {
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String prefix = "a/b/c/d/";
+    String fileName = UUID.randomUUID().toString();
+    String keyName = prefix + fileName;
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    long parentID = 1027; // assume objectID of dir path "a/b/c/d" is 1027
+    List<OmDirectoryInfo> parentDirInfos = new ArrayList<>();
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
+            createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, parentDirInfos);
+
+    s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    // Do manual commit and see whether addToBatch is successful or not.
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+
+    String multipartOpenKey = omMetadataManager
+        .getMultipartKey(parentID, fileName, multipartUploadID);
+
+    OmKeyInfo omKeyInfo =
+        omMetadataManager.getOpenKeyTable().get(multipartOpenKey);
+    Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
+    Assert.assertEquals("FileName mismatches!", fileName,
+            omKeyInfo.getKeyName());
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omKeyInfo.getParentObjectID());
+
+    OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager
+            .getMultipartInfoTable().get(multipartKey);
+    Assert.assertNotNull("Failed to find the multipartFileInfo",
+            omMultipartKeyInfo);
+    Assert.assertEquals("ParentId mismatches!", parentID,
+            omMultipartKeyInfo.getParentID());
+
+    Assert.assertEquals("Upload Id mismatches!", multipartUploadID,
+            omMultipartKeyInfo.getUploadID());
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 1231437..cb28316 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -19,8 +19,10 @@
 
 package org.apache.hadoop.ozone.om.response.s3.multipart;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.List;
 import java.util.UUID;
 
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -31,9 +33,11 @@
 import org.junit.rules.TemporaryFolder;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .KeyInfo;
@@ -113,13 +117,13 @@
                 .setKeyName(keyName)
                 .setMultipartUploadID(multipartUploadID)).build();
 
-    return new S3InitiateMultipartUploadResponse(omResponse, multipartKeyInfo,
-        omKeyInfo);
+    return getS3InitiateMultipartUploadResp(multipartKeyInfo, omKeyInfo,
+        omResponse);
   }
 
   public S3MultipartUploadAbortResponse createS3AbortMPUResponse(
-      String multipartKey, OmMultipartKeyInfo omMultipartKeyInfo,
-      OmBucketInfo omBucketInfo) {
+      String multipartKey, String multipartOpenKey,
+      OmMultipartKeyInfo omMultipartKeyInfo, OmBucketInfo omBucketInfo) {
     OMResponse omResponse = OMResponse.newBuilder()
         .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload)
         .setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -127,11 +131,10 @@
         .setAbortMultiPartUploadResponse(
             MultipartUploadAbortResponse.newBuilder().build()).build();
 
-    return new S3MultipartUploadAbortResponse(omResponse, multipartKey,
-        omMultipartKeyInfo, true, omBucketInfo);
+    return getS3MultipartUploadAbortResp(multipartKey,
+        multipartOpenKey, omMultipartKeyInfo, omBucketInfo, omResponse);
   }
 
-
   public void addPart(int partNumber, PartKeyInfo partKeyInfo,
       OmMultipartKeyInfo omMultipartKeyInfo) {
     omMultipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo);
@@ -153,4 +156,169 @@
             .setType(HddsProtos.ReplicationType.RATIS)
             .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
   }
+
+  public PartKeyInfo createPartKeyInfoFSO(
+      String volumeName, String bucketName, long parentID, String fileName,
+      int partNumber) {
+    return PartKeyInfo.newBuilder()
+        .setPartNumber(partNumber)
+        .setPartName(omMetadataManager.getOzonePathKey(parentID, fileName +
+                UUID.randomUUID().toString()))
+        .setPartKeyInfo(KeyInfo.newBuilder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setDataSize(100L) // Just set dummy size for testing
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setParentID(parentID)
+            .setType(HddsProtos.ReplicationType.RATIS)
+            .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build();
+  }
+
+  public S3InitiateMultipartUploadResponse createS3InitiateMPUResponseFSO(
+      String volumeName, String bucketName, long parentID, String keyName,
+      String multipartUploadID, List<OmDirectoryInfo> parentDirInfos) {
+    OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+            .setUploadID(multipartUploadID)
+            .setCreationTime(Time.now())
+            .setReplicationConfig(new RatisReplicationConfig(
+                    HddsProtos.ReplicationFactor.ONE))
+            .setParentID(parentID)
+            .build();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+
+    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setFileName(fileName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setReplicationConfig(new RatisReplicationConfig(
+                    HddsProtos.ReplicationFactor.ONE))
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .setParentObjectID(parentID)
+            .build();
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload)
+            .setStatus(OzoneManagerProtocolProtos.Status.OK)
+            .setSuccess(true).setInitiateMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartInfoInitiateResponse
+                            .newBuilder().setVolumeName(volumeName)
+                            .setBucketName(bucketName)
+                            .setKeyName(keyName)
+                            .setMultipartUploadID(multipartUploadID)).build();
+
+    String mpuKey = omMetadataManager.getMultipartKey(
+        omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(),
+        keyName, multipartUploadID);
+
+    return new S3InitiateMultipartUploadResponseWithFSO(omResponse,
+        multipartKeyInfo, omKeyInfo, mpuKey, parentDirInfos);
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseFSO(
+          String volumeName, String bucketName, long parentID, String keyName,
+          String multipartUploadID,
+          OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo,
+          OmMultipartKeyInfo multipartKeyInfo,
+          OzoneManagerProtocolProtos.Status status, String openKey)
+          throws IOException {
+    if (multipartKeyInfo == null) {
+      multipartKeyInfo = new OmMultipartKeyInfo.Builder()
+              .setUploadID(multipartUploadID)
+              .setCreationTime(Time.now())
+              .setReplicationConfig(new RatisReplicationConfig(
+                      HddsProtos.ReplicationFactor.ONE))
+              .setParentID(parentID)
+              .build();
+    }
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+
+    boolean isRatisEnabled = true;
+    String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+    OmBucketInfo omBucketInfo =
+            omMetadataManager.getBucketTable().get(bucketKey);
+
+    OmKeyInfo openPartKeyInfoToBeDeleted = new OmKeyInfo.Builder()
+            .setVolumeName(volumeName)
+            .setBucketName(bucketName)
+            .setKeyName(fileName)
+            .setFileName(fileName)
+            .setCreationTime(Time.now())
+            .setModificationTime(Time.now())
+            .setReplicationConfig(new RatisReplicationConfig(
+                    HddsProtos.ReplicationFactor.ONE))
+            .setOmKeyLocationInfos(Collections.singletonList(
+                    new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+            .build();
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload)
+            .setStatus(status).setSuccess(true)
+            .setCommitMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse
+                            .newBuilder().setPartName(volumeName)).build();
+
+    return new S3MultipartUploadCommitPartResponseWithFSO(omResponse,
+        multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo,
+        openPartKeyInfoToBeDeleted, isRatisEnabled, omBucketInfo);
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO(
+          String volumeName, String bucketName, long parentID, String keyName,
+          String multipartUploadID, OmKeyInfo omKeyInfo,
+          OzoneManagerProtocolProtos.Status status,
+          List<OmKeyInfo> unUsedParts) {
+
+
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+
+    String multipartOpenKey = getMultipartKey(parentID, keyName,
+        multipartUploadID);
+
+    OMResponse omResponse = OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.CompleteMultiPartUpload)
+            .setStatus(status).setSuccess(true)
+            .setCompleteMultiPartUploadResponse(
+                    OzoneManagerProtocolProtos.MultipartUploadCompleteResponse
+                            .newBuilder().setBucket(bucketName)
+                            .setVolume(volumeName).setKey(keyName)).build();
+
+    return new S3MultipartUploadCompleteResponseWithFSO(omResponse,
+        multipartKey, multipartOpenKey, omKeyInfo, unUsedParts);
+  }
+
+  private String getMultipartKey(long parentID, String keyName,
+                                 String multipartUploadID) {
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+  }
+
+  protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp(
+      OmMultipartKeyInfo multipartKeyInfo, OmKeyInfo omKeyInfo,
+      OMResponse omResponse) {
+    return new S3InitiateMultipartUploadResponse(omResponse, multipartKeyInfo,
+        omKeyInfo);
+  }
+
+  protected S3MultipartUploadAbortResponse getS3MultipartUploadAbortResp(
+      String multipartKey, String multipartOpenKey,
+      OmMultipartKeyInfo omMultipartKeyInfo, OmBucketInfo omBucketInfo,
+      OMResponse omResponse) {
+    return new S3MultipartUploadAbortResponse(omResponse, multipartKey,
+        multipartOpenKey, omMultipartKeyInfo, true, omBucketInfo);
+  }
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
index a11c4db..a568f90 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
@@ -43,8 +43,11 @@
 
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
     String multipartUploadID = UUID.randomUUID().toString();
+    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName,
+        keyName, multipartUploadID);
+
     String multipartKey = omMetadataManager.getMultipartKey(volumeName,
         bucketName, keyName, multipartUploadID);
 
@@ -59,7 +62,7 @@
         batchOperation);
 
     S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse =
-        createS3AbortMPUResponse(multipartKey,
+        createS3AbortMPUResponse(multipartKey, multipartOpenKey,
             s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(),
             omBucketInfo);
 
@@ -82,8 +85,10 @@
 
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
-    String keyName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
     String multipartUploadID = UUID.randomUUID().toString();
+    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName,
+        keyName, multipartUploadID);
     String multipartKey = omMetadataManager.getMultipartKey(volumeName,
         bucketName, keyName, multipartUploadID);
 
@@ -116,7 +121,7 @@
 
 
     S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse =
-        createS3AbortMPUResponse(multipartKey,
+        createS3AbortMPUResponse(multipartKey, multipartOpenKey,
             s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(),
             omBucketInfo);
 
@@ -154,4 +159,14 @@
         ro.getOmKeyInfoList().get(0));
   }
 
+  protected String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
+
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    return omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
+  }
+
 }
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseWithFSO.java
new file mode 100644
index 0000000..41a089e
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponseWithFSO.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Test multipart upload abort response.
+ */
+public class TestS3MultipartUploadAbortResponseWithFSO
+    extends TestS3MultipartUploadAbortResponse {
+
+  private String dirName = "abort/b/c/";
+
+  private long parentID = 1027;
+
+  @Override
+  protected String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  @Override
+  protected String getMultipartOpenKey(String volumeName, String bucketName,
+      String keyName, String multipartUploadID) {
+    String fileName = StringUtils.substringAfter(keyName, dirName);
+    return omMetadataManager.getMultipartKey(parentID, fileName,
+        multipartUploadID);
+  }
+
+  @Override
+  protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp(
+      OmMultipartKeyInfo multipartKeyInfo, OmKeyInfo omKeyInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+
+    String mpuDBKey =
+        omMetadataManager.getMultipartKey(omKeyInfo.getVolumeName(),
+        omKeyInfo.getBucketName(), omKeyInfo.getKeyName(),
+        multipartKeyInfo.getUploadID());
+
+    return new S3InitiateMultipartUploadResponseWithFSO(omResponse,
+        multipartKeyInfo, omKeyInfo, mpuDBKey, new ArrayList<>());
+  }
+
+  @Override
+  protected S3MultipartUploadAbortResponse getS3MultipartUploadAbortResp(
+      String multipartKey, String multipartOpenKey,
+      OmMultipartKeyInfo omMultipartKeyInfo, OmBucketInfo omBucketInfo,
+      OzoneManagerProtocolProtos.OMResponse omResponse) {
+    return new S3MultipartUploadAbortResponseWithFSO(omResponse, multipartKey,
+        multipartOpenKey, omMultipartKeyInfo, true, omBucketInfo);
+  }
+
+  @Override
+  public OzoneManagerProtocolProtos.PartKeyInfo createPartKeyInfo(
+      String volumeName, String bucketName, String keyName, int partNumber) {
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    return createPartKeyInfoFSO(volumeName, bucketName, parentID, fileName,
+        partNumber);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java
new file mode 100644
index 0000000..17a8e60
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Test multipart upload commit part response.
+ */
+public class TestS3MultipartUploadCommitPartResponseWithFSO
+    extends TestS3MultipartResponse {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    createParentPath(volumeName, bucketName);
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+        createS3CommitMPUResponseFSO(volumeName, bucketName, parentID, keyName,
+            multipartUploadID, null, null,
+                OzoneManagerProtocolProtos.Status.OK, openKey);
+
+    s3MultipartUploadCommitPartResponse.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(openKey));
+    Assert.assertNotNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    // As no parts are created, so no entries should be there in delete table.
+    Assert.assertEquals(0, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  @Test
+  public void testAddDBToBatchWithParts() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager
+        .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
+            createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseFSO.getOmMultipartKeyInfo();
+
+    PartKeyInfo part1 = createPartKeyInfoFSO(volumeName, bucketName, parentID,
+        fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo,
+                    OzoneManagerProtocolProtos.Status.OK,  openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(openKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // As 1 parts are created, so 1 entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+        omMetadataManager.getDeletedTable()));
+
+    String part1DeletedKeyName =
+        omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+        part1DeletedKeyName));
+
+    RepeatedOmKeyInfo ro =
+        omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
+    Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()),
+        ro.getOmKeyInfoList().get(0));
+  }
+
+  @Test
+  public void testWithMultipartUploadError() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName,
+            multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
+            createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseFSO.getOmMultipartKeyInfo();
+
+    PartKeyInfo part1 = createPartKeyInfoFSO(volumeName, bucketName, parentID,
+            fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName + "invalid", multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo, OzoneManagerProtocolProtos.Status
+                            .NO_SUCH_MULTIPART_UPLOAD_ERROR, openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(openKey));
+    Assert.assertNull(
+            omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // openkey entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+            openKey));
+  }
+
+  private String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  private void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java
new file mode 100644
index 0000000..624c2fb
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.multipart;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Test multipart upload complete response.
+ */
+public class TestS3MultipartUploadCompleteResponseWithFSO
+    extends TestS3MultipartResponse {
+
+  private String dirName = "a/b/c/";
+
+  private long parentID;
+
+  @Test
+  public void testAddDBToBatch() throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+
+    long txnId = 50;
+    long objectId = parentID + 1;
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String dbMultipartKey = omMetadataManager.getMultipartKey(volumeName,
+            bucketName, keyName, multipartUploadID);
+    String dbMultipartOpenKey = omMetadataManager.getMultipartKey(parentID,
+            fileName, multipartUploadID);
+    long clientId = Time.now();
+
+    // add MPU entry to OpenFileTable
+    List<OmDirectoryInfo> parentDirInfos = new ArrayList<>();
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
+        createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID,
+            keyName, multipartUploadID, parentDirInfos);
+
+    s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    String dbOpenKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+    String dbKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+    OmKeyInfo omKeyInfoFSO =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId,
+                    Time.now());
+
+    // add key to openFileTable
+    omKeyInfoFSO.setKeyName(fileName);
+    TestOMRequestUtils.addFileToKeyTable(true, false,
+            fileName, omKeyInfoFSO, clientId, omKeyInfoFSO.getObjectID(),
+            omMetadataManager);
+
+    addS3MultipartUploadCommitPartResponseFSO(volumeName, bucketName, keyName,
+            multipartUploadID, dbOpenKey);
+
+    Assert.assertNotNull(
+        omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
+    Assert.assertNotNull(
+        omMetadataManager.getOpenKeyTable().get(dbMultipartOpenKey));
+
+    List<OmKeyInfo> unUsedParts = new ArrayList<>();
+    S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse =
+            createS3CompleteMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, omKeyInfoFSO,
+                OzoneManagerProtocolProtos.Status.OK, unUsedParts);
+
+    s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager,
+        batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(dbKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
+    Assert.assertNull(
+            omMetadataManager.getOpenKeyTable().get(dbMultipartOpenKey));
+
+    // As no parts are created, so no entries should be there in delete table.
+    Assert.assertEquals(0, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  @Test
+  public void testAddDBToBatchWithParts() throws Exception {
+
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = getKeyName();
+
+    TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+            omMetadataManager);
+    createParentPath(volumeName, bucketName);
+
+    String multipartUploadID = UUID.randomUUID().toString();
+
+    int deleteEntryCount = 0;
+
+    String fileName = OzoneFSUtils.getFileName(keyName);
+    String dbMultipartKey = omMetadataManager.getMultipartKey(volumeName,
+        bucketName, keyName, multipartUploadID);
+    String dbMultipartOpenKey = omMetadataManager.getMultipartKey(parentID,
+        fileName, multipartUploadID);
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
+            addS3InitiateMultipartUpload(volumeName, bucketName, keyName,
+                    multipartUploadID);
+
+    // Add some dummy parts for testing.
+    // Not added any key locations, as this just test is to see entries are
+    // adding to delete table or not.
+    OmMultipartKeyInfo omMultipartKeyInfo =
+            s3InitiateMultipartUploadResponseFSO.getOmMultipartKeyInfo();
+
+    OmKeyInfo omKeyInfoFSO = commitS3MultipartUpload(volumeName, bucketName,
+            keyName, multipartUploadID, fileName, dbMultipartKey,
+            omMultipartKeyInfo);
+    // After commits, it adds an entry to the deleted table.
+    deleteEntryCount++;
+
+    OmKeyInfo omKeyInfo =
+            TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+                    HddsProtos.ReplicationType.RATIS,
+                    HddsProtos.ReplicationFactor.ONE,
+                    parentID + 10,
+                    parentID, 100, Time.now());
+    List<OmKeyInfo> unUsedParts = new ArrayList<>();
+    unUsedParts.add(omKeyInfo);
+    S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse =
+            createS3CompleteMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, omKeyInfoFSO,
+                    OzoneManagerProtocolProtos.Status.OK, unUsedParts);
+
+    s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+    String dbKey = omMetadataManager.getOzonePathKey(parentID,
+          omKeyInfoFSO.getFileName());
+    Assert.assertNotNull(omMetadataManager.getKeyTable().get(dbKey));
+    Assert.assertNull(
+            omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
+    Assert.assertNull(
+            omMetadataManager.getOpenKeyTable().get(dbMultipartOpenKey));
+
+    // As 1 unused parts exists, so 1 unused entry should be there in delete
+    // table.
+    deleteEntryCount++;
+    Assert.assertEquals(deleteEntryCount, omMetadataManager.countRowsInTable(
+            omMetadataManager.getDeletedTable()));
+  }
+
+  private OmKeyInfo commitS3MultipartUpload(String volumeName,
+      String bucketName, String keyName, String multipartUploadID,
+      String fileName, String multipartKey,
+      OmMultipartKeyInfo omMultipartKeyInfo) throws IOException {
+
+    PartKeyInfo part1 = createPartKeyInfoFSO(volumeName, bucketName, parentID,
+        fileName, 1);
+
+    addPart(1, part1, omMultipartKeyInfo);
+
+    long clientId = Time.now();
+    String openKey = omMetadataManager.getOpenFileName(parentID, fileName,
+            clientId);
+
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID,
+                    omMultipartKeyInfo.getPartKeyInfo(1),
+                    omMultipartKeyInfo,
+                    OzoneManagerProtocolProtos.Status.OK,  openKey);
+
+    s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager,
+            batchOperation);
+
+    Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
+    Assert.assertNull(
+        omMetadataManager.getMultipartInfoTable().get(multipartKey));
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // As 1 parts are created, so 1 entry should be there in delete table.
+    Assert.assertEquals(1, omMetadataManager.countRowsInTable(
+        omMetadataManager.getDeletedTable()));
+
+    String part1DeletedKeyName =
+        omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
+
+    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(
+        part1DeletedKeyName));
+
+    RepeatedOmKeyInfo ro =
+        omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
+    OmKeyInfo omPartKeyInfo = OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo());
+    Assert.assertEquals(omPartKeyInfo, ro.getOmKeyInfoList().get(0));
+
+    return omPartKeyInfo;
+  }
+
+  private S3InitiateMultipartUploadResponse addS3InitiateMultipartUpload(
+          String volumeName, String bucketName, String keyName,
+          String multipartUploadID) throws IOException {
+
+    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO =
+            createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, new ArrayList<>());
+
+    s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    return s3InitiateMultipartUploadResponseFSO;
+  }
+
+  private String getKeyName() {
+    return dirName + UUID.randomUUID().toString();
+  }
+
+  private void createParentPath(String volumeName, String bucketName)
+      throws Exception {
+    // Create parent dirs for the path
+    parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+            dirName, omMetadataManager);
+  }
+
+  private void addS3MultipartUploadCommitPartResponseFSO(String volumeName,
+      String bucketName, String keyName, String multipartUploadID,
+      String openKey) throws IOException {
+    S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse =
+            createS3CommitMPUResponseFSO(volumeName, bucketName, parentID,
+                    keyName, multipartUploadID, null, null,
+                    OzoneManagerProtocolProtos.Status.OK, openKey);
+
+    s3MultipartUploadCommitPartResponse.addToDBBatch(omMetadataManager,
+            batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObj.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObj.java
new file mode 100644
index 0000000..b28a52a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObj.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.security.acl;
+
+import org.apache.hadoop.ozone.om.KeyManager;
+import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Unit test OzoneObjInfo cases.
+ */
+public class TestOzoneObj {
+
+  private OzoneObjInfo objInfo;
+  private OzoneObjInfo.Builder builder;
+  private String volume = "vol1";
+  private String bucket = "bucket1";
+  private String key = "key1";
+  private static final OzoneObj.StoreType STORE = OzoneObj.StoreType.OZONE;
+
+  @Test
+  public void testGetPathViewer() throws IOException {
+
+    builder = getBuilder(volume, bucket, key);
+    objInfo = builder.build();
+    assertEquals(objInfo.getVolumeName(), volume);
+    assertNotNull("unexpected path accessor",
+        objInfo.getOzonePrefixPathViewer());
+
+    objInfo = getBuilder(null, null, null).build();
+    assertEquals(objInfo.getVolumeName(), null);
+    assertNotNull("unexpected path accessor",
+        objInfo.getOzonePrefixPathViewer());
+
+    objInfo = getBuilder(volume, null, null).build();
+    assertEquals(objInfo.getVolumeName(), volume);
+    assertNotNull("unexpected path accessor",
+        objInfo.getOzonePrefixPathViewer());
+
+  }
+
+  private OzoneObjInfo.Builder getBuilder(String withVolume,
+      String withBucket, String withKey) throws IOException {
+
+    KeyManager mockKeyManager = mock(KeyManager.class);
+    OzonePrefixPath prefixPathViewer = new OzonePrefixPathImpl("vol1",
+        "buck1", "file", mockKeyManager);
+
+    return OzoneObjInfo.Builder.newBuilder()
+        .setResType(OzoneObj.ResourceType.VOLUME)
+        .setStoreType(STORE)
+        .setVolumeName(withVolume)
+        .setBucketName(withBucket)
+        .setKeyName(withKey)
+        .setOzonePrefixPath(prefixPathViewer);
+  }
+
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestRequestContext.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestRequestContext.java
index b8b0363..5e76e09 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestRequestContext.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestRequestContext.java
@@ -20,13 +20,15 @@
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.IOException;
+
 /**
  * Test request context.
  */
 public class TestRequestContext {
 
   @Test
-  public void testRecursiveAccessFlag() {
+  public void testRecursiveAccessFlag() throws IOException {
     RequestContext context = getUserRequestContext("om",
             IAccessAuthorizer.ACLType.CREATE, false, "volume1",
             true);
@@ -78,7 +80,8 @@
 
   private RequestContext getUserRequestContext(String username,
       IAccessAuthorizer.ACLType type, boolean isOwner, String ownerName,
-      boolean recursiveAccessCheck) {
+      boolean recursiveAccessCheck) throws IOException {
+
     return RequestContext.getBuilder(
             UserGroupInformation.createRemoteUser(username), null, null,
             type, ownerName, recursiveAccessCheck).build();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index fa5980c..3d2ef03bb 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -52,6 +53,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
@@ -250,6 +252,7 @@
     return true;
   }
 
+
   /**
    * Helper method to delete an object specified by key name in bucket.
    *
@@ -257,12 +260,32 @@
    * @return true if the key is deleted, false otherwise
    */
   @Override
-  public boolean deleteObject(String keyName) {
+  public boolean deleteObject(String keyName) throws IOException {
+    return deleteObject(keyName, false);
+  }
+
+  /**
+   * Helper method to delete an object specified by key name in bucket.
+   *
+   * @param keyName key name to be deleted
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
+   * @return true if the key is deleted, false otherwise
+   */
+  @Override
+  public boolean deleteObject(String keyName, boolean recursive)
+      throws IOException {
     LOG.trace("issuing delete for key {}", keyName);
     try {
       incrementCounter(Statistic.OBJECTS_DELETED, 1);
-      bucket.deleteKey(keyName);
+      bucket.deleteDirectory(keyName, recursive);
       return true;
+    } catch (OMException ome) {
+      LOG.error("delete key failed {}", ome.getMessage());
+      if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) {
+        throw new PathIsNotEmptyDirectoryException(ome.getMessage());
+      }
+      return false;
     } catch (IOException ioe) {
       LOG.error("delete key failed {}", ioe.getMessage());
       return false;
@@ -520,4 +543,8 @@
     return blockLocations;
   }
 
+  @Override
+  public boolean isFSOptimizedBucket() {
+    return OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata());
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index e5695df..9a9fb51 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -314,6 +314,7 @@
 
     String srcPath = src.toUri().getPath();
     String dstPath = dst.toUri().getPath();
+    // TODO: Discuss do we need to throw exception.
     if (srcPath.equals(dstPath)) {
       return true;
     }
@@ -325,6 +326,10 @@
       return false;
     }
 
+    if (adapter.isFSOptimizedBucket()) {
+      return renameFSO(srcPath, dstPath);
+    }
+
     // Check if the source exists
     FileStatus srcStatus;
     try {
@@ -405,6 +410,24 @@
     return result;
   }
 
+  private boolean renameFSO(String srcPath, String dstPath)
+      throws IOException {
+    try {
+      adapter.renameKey(srcPath, dstPath);
+    } catch (OMException ome) {
+      LOG.error("rename key failed: {}. Error code: {} source:{}, destin:{}",
+              ome.getMessage(), ome.getResult(), srcPath, dstPath);
+      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult() ||
+          OMException.ResultCodes.KEY_RENAME_ERROR  == ome.getResult() ||
+          OMException.ResultCodes.KEY_NOT_FOUND == ome.getResult()) {
+        return false;
+      } else {
+        throw ome;
+      }
+    }
+    return true;
+  }
+
   /**
    * Intercept rename to trash calls from TrashPolicyDefault.
    */
@@ -485,6 +508,20 @@
     incrementCounter(Statistic.INVOCATION_DELETE, 1);
     statistics.incrementWriteOps(1);
     LOG.debug("Delete path {} - recursive {}", f, recursive);
+
+    if (adapter.isFSOptimizedBucket()) {
+      if (f.isRoot()) {
+        if (!recursive && listStatus(f).length!=0){
+          throw new PathIsNotEmptyDirectoryException(f.toString());
+        }
+        LOG.warn("Cannot delete root directory.");
+        return false;
+      }
+
+      String key = pathToKey(f);
+      return adapter.deleteObject(key, recursive);
+    }
+
     FileStatus status;
     try {
       status = getFileStatus(f);
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index dbfa3a5..f25dd45 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -38,6 +38,7 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -431,10 +432,13 @@
    * Helper method to delete an object specified by key name in bucket.
    *
    * @param path path to a key to be deleted
+   * @param recursive recursive deletion of all sub path keys if true,
+   *                  otherwise non-recursive
    * @return true if the key is deleted, false otherwise
    */
   @Override
-  public boolean deleteObject(String path) {
+  public boolean deleteObject(String path, boolean recursive)
+      throws IOException {
     LOG.trace("issuing delete for path to key: {}", path);
     incrementCounter(Statistic.OBJECTS_DELETED, 1);
     OFSPath ofsPath = new OFSPath(path);
@@ -444,14 +448,25 @@
     }
     try {
       OzoneBucket bucket = getBucket(ofsPath, false);
-      bucket.deleteKey(keyName);
+      bucket.deleteDirectory(keyName, recursive);
       return true;
+    } catch (OMException ome) {
+      LOG.error("delete key failed {}", ome.getMessage());
+      if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) {
+        throw new PathIsNotEmptyDirectoryException(ome.getMessage());
+      }
+      return false;
     } catch (IOException ioe) {
       LOG.error("delete key failed " + ioe.getMessage());
       return false;
     }
   }
 
+  @Override
+  public boolean deleteObject(String path) throws IOException {
+    return deleteObject(path, false);
+  }
+
   /**
    * Helper function to check if the list of key paths are in the same volume
    * and same bucket.
@@ -1026,4 +1041,10 @@
         null, null, null, new BlockLocation[0]
     );
   }
+
+  @Override
+  public boolean isFSOptimizedBucket() {
+    // TODO: Need to refine this part.
+    return false;
+  }
 }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index 55edd00..c6c6acd 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -308,6 +308,10 @@
     if (!ofsSrc.isInSameBucketAs(ofsDst)) {
       throw new IOException("Cannot rename a key to a different bucket");
     }
+    OzoneBucket bucket = adapterImpl.getBucket(ofsSrc, false);
+    if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+      return renameFSO(bucket, ofsSrc, ofsDst);
+    }
 
     // Cannot rename a directory to its own subdirectory
     Path dstParent = dst.getParent();
@@ -385,6 +389,29 @@
     return result;
   }
 
+  private boolean renameFSO(OzoneBucket bucket,
+      OFSPath srcPath, OFSPath dstPath) throws IOException {
+    // construct src and dst key paths
+    String srcKeyPath = srcPath.getNonKeyPathNoPrefixDelim() +
+        OZONE_URI_DELIMITER + srcPath.getKeyName();
+    String dstKeyPath = dstPath.getNonKeyPathNoPrefixDelim() +
+        OZONE_URI_DELIMITER + dstPath.getKeyName();
+    try {
+      adapterImpl.rename(bucket, srcKeyPath, dstKeyPath);
+    } catch (OMException ome) {
+      LOG.error("rename key failed: {}. source:{}, destin:{}",
+          ome.getMessage(), srcKeyPath, dstKeyPath);
+      if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult() ||
+          OMException.ResultCodes.KEY_RENAME_ERROR  == ome.getResult() ||
+          OMException.ResultCodes.KEY_NOT_FOUND == ome.getResult()) {
+        return false;
+      } else {
+        throw ome;
+      }
+    }
+    return true;
+  }
+
   /**
    * Intercept rename to trash calls from TrashPolicyDefault.
    */
@@ -501,6 +528,16 @@
         return false;
       }
 
+
+      if (!ofsPath.isVolume() && !ofsPath.isBucket()) {
+        OzoneBucket bucket = adapterImpl.getBucket(ofsPath, false);
+        if (OzoneFSUtils.isFSOptimizedBucket(bucket.getMetadata())) {
+          String ofsKeyPath = ofsPath.getNonKeyPathNoPrefixDelim() +
+              OZONE_URI_DELIMITER + ofsPath.getKeyName();
+          return adapterImpl.deleteObject(ofsKeyPath, recursive);
+        }
+      }
+
       // Handle delete volume
       if (ofsPath.isVolume()) {
         String volumeName = ofsPath.getVolumeName();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index b9e2881..0258f69 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -51,7 +51,9 @@
 
   boolean createDirectory(String keyName) throws IOException;
 
-  boolean deleteObject(String keyName);
+  boolean deleteObject(String keyName) throws IOException;
+
+  boolean deleteObject(String keyName, boolean recursive) throws IOException;
 
   boolean deleteObjects(List<String> keyName);
 
@@ -75,4 +77,5 @@
   FileStatusAdapter getFileStatus(String key, URI uri,
       Path qualifiedPath, String userName) throws IOException;
 
+  boolean isFSOptimizedBucket();
 }
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 623ddad..f44afe4 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -438,6 +438,11 @@
       } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
         //NOT_FOUND is not a problem, AWS doesn't throw exception for missing
         // keys. Just return 204
+      } else if (ex.getResult() == ResultCodes.DIRECTORY_NOT_EMPTY) {
+        // With PREFIX metadata layout, a dir deletion without recursive flag
+        // to true will throw DIRECTORY_NOT_EMPTY error for a non-empty dir.
+        // NOT_FOUND is not a problem, AWS doesn't throw exception for missing
+        // keys. Just return 204
       } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
         throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath);
       } else {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
new file mode 100644
index 0000000..c55a1cd
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import java.nio.file.Path;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.*;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine;
+import picocli.CommandLine.Model.CommandSpec;
+import picocli.CommandLine.Spec;
+
+/**
+ * Tool that parses OM db file for prefix table.
+ */
+@CommandLine.Command(
+    name = "prefix",
+    description = "Parse prefix contents")
+@MetaInfServices(SubcommandWithParent.class)
+public class PrefixParser implements Callable<Void>, SubcommandWithParent {
+
+  /**
+   * Types to represent the level or path component type.
+   */
+  public enum Types {
+    VOLUME,
+    BUCKET,
+    FILE,
+    DIRECTORY,
+    INTERMEDIATE_DIRECTORY,
+    NON_EXISTENT_DIRECTORY,
+  }
+
+  private final int[] parserStats = new int[Types.values().length];
+
+  @Spec
+  private CommandSpec spec;
+
+  @CommandLine.Option(names = {"--db"},
+      required = true,
+      description = "Database File Path")
+  private String dbPath;
+
+  @CommandLine.Option(names = {"--path"},
+      required = true,
+      description = "prefixFile Path")
+  private String filePath;
+
+  @CommandLine.Option(names = {"--bucket"},
+      required = true,
+      description = "bucket name")
+  private String bucket;
+
+  @CommandLine.Option(names = {"--volume"},
+      required = true,
+      description = "volume name")
+  private String volume;
+
+  public String getDbPath() {
+    return dbPath;
+  }
+
+  public void setDbPath(String dbPath) {
+    this.dbPath = dbPath;
+  }
+
+  @Override
+  public Class<?> getParentType() {
+    return OzoneDebug.class;
+  }
+
+  @Override
+  public Void call() throws Exception {
+    parse(volume, bucket, dbPath, filePath);
+    return null;
+  }
+
+  public static void main(String[] args) throws Exception {
+    new PrefixParser().call();
+  }
+
+  public void parse(String vol, String buck, String db,
+                    String file) throws Exception {
+    if (!Files.exists(Paths.get(db))) {
+      System.out.println("DB path not exist:" + db);
+      return;
+    }
+
+    System.out.println("FilePath is:" + file);
+    System.out.println("Db Path is:" + db);
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, db);
+    OzoneManagerRatisUtils.setBucketFSOptimized(true);
+
+    OmMetadataManagerImpl metadataManager =
+        new OmMetadataManagerImpl(conf);
+    metadataManager.start(conf);
+
+    org.apache.hadoop.fs.Path effectivePath =
+        new org.apache.hadoop.fs.Path("/");
+
+    Path p = Paths.get(file);
+
+    String volumeKey = metadataManager.getVolumeKey(vol);
+    if (!metadataManager.getVolumeTable().isExist(volumeKey)) {
+      System.out.println("Invalid Volume:" + vol);
+      metadataManager.stop();
+      return;
+    }
+
+    parserStats[Types.VOLUME.ordinal()]++;
+    // First get the info about the bucket
+    String bucketKey = metadataManager.getBucketKey(vol, buck);
+    OmBucketInfo info = metadataManager.getBucketTable().get(bucketKey);
+    if (info == null) {
+      System.out.println("Invalid Bucket:" + buck);
+      metadataManager.stop();
+      return;
+    }
+
+    long lastObjectId = info.getObjectID();
+    WithParentObjectId objectBucketId = new WithParentObjectId();
+    objectBucketId.setObjectID(lastObjectId);
+    dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey);
+
+    Iterator<Path> pathIterator =  p.iterator();
+    while(pathIterator.hasNext()) {
+      Path elem = pathIterator.next();
+      String path =
+          metadataManager.getOzonePathKey(lastObjectId, elem.toString());
+      OmDirectoryInfo directoryInfo =
+          metadataManager.getDirectoryTable().get(path);
+
+      org.apache.hadoop.fs.Path tmpPath =
+          getEffectivePath(effectivePath, elem.toString());
+      if (directoryInfo == null) {
+        System.out.println("Given path contains a non-existent directory at:" +
+            tmpPath);
+        System.out.println("Dumping files and dirs at level:" +
+            tmpPath.getParent());
+        System.out.println();
+        parserStats[Types.NON_EXISTENT_DIRECTORY.ordinal()]++;
+        break;
+      }
+
+      effectivePath = tmpPath;
+
+      dumpInfo(Types.INTERMEDIATE_DIRECTORY, effectivePath,
+          directoryInfo, path);
+      lastObjectId = directoryInfo.getObjectID();
+    }
+
+    // at the last level, now parse both file and dir table
+    dumpTableInfo(Types.DIRECTORY, effectivePath,
+        metadataManager.getDirectoryTable(), lastObjectId);
+
+    dumpTableInfo(Types.FILE, effectivePath,
+        metadataManager.getKeyTable(), lastObjectId);
+    metadataManager.stop();
+  }
+
+  private void dumpTableInfo(Types type,
+      org.apache.hadoop.fs.Path effectivePath,
+      Table<String, ? extends WithParentObjectId> table, long lastObjectId)
+      throws IOException {
+    MetadataKeyFilters.KeyPrefixFilter filter = getPrefixFilter(lastObjectId);
+
+    List<? extends KeyValue
+        <String, ? extends WithParentObjectId>> infoList =
+        table.getRangeKVs(null, 1000, filter);
+
+    for (KeyValue<String, ? extends WithParentObjectId> info :infoList) {
+      Path key = Paths.get(info.getKey());
+      dumpInfo(type, getEffectivePath(effectivePath,
+          key.getName(1).toString()), info.getValue(), info.getKey());
+    }
+  }
+
+  private org.apache.hadoop.fs.Path getEffectivePath(
+      org.apache.hadoop.fs.Path currentPath, String name) {
+    return new org.apache.hadoop.fs.Path(currentPath, name);
+  }
+
+  private void dumpInfo(Types level, org.apache.hadoop.fs.Path effectivePath,
+                        WithParentObjectId id,  String key) {
+    parserStats[level.ordinal()]++;
+    System.out.println("Type:" + level);
+    System.out.println("Path: " + effectivePath);
+    System.out.println("DB Path: " + key);
+    System.out.println("Object Id: " + id.getObjectID());
+    System.out.println("Parent object Id: " + id.getParentObjectID());
+    System.out.println();
+
+  }
+
+  private static MetadataKeyFilters.KeyPrefixFilter getPrefixFilter(long id) {
+    return (new MetadataKeyFilters.KeyPrefixFilter())
+        .addFilter(Long.toString(id));
+  }
+
+  public int getParserStats(Types type) {
+    return parserStats[type.ordinal()];
+  }
+}