HDDS-4879. Support reserved space for single dir (#1970)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index d2a1330..d2d1112 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -203,6 +203,8 @@
       "hdds.rest.http-address";
   public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880";
   public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir";
+  public static final String HDDS_DATANODE_DIR_DU_RESERVED =
+      "hdds.datanode.dir.du.reserved";
   public static final String HDDS_REST_CSRF_ENABLED_KEY =
       "hdds.rest.rest-csrf.enabled";
   public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false;
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 94c2a4d..051868e 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -145,6 +145,14 @@
     </description>
   </property>
   <property>
+    <name>hdds.datanode.dir.du.reserved</name>
+    <value/>
+    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+       Such as /dir1:100B, /dir2:200MB, means dir1 reserves 100 bytes and dir2 reserves 200 MB.
+    </description>
+  </property>
+  <property>
     <name>hdds.datanode.volume.choosing.policy</name>
     <value/>
     <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index e0669c7..875d8e9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -20,9 +20,11 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.StorageSize;
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams;
 
@@ -30,6 +32,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED;
+
 /**
  * Stores information about a disk/volume.
  */
@@ -48,6 +52,8 @@
   // query from the filesystem.
   private long configuredCapacity;
 
+  private long reservedInBytes;
+
   /**
    * Builder for VolumeInfo.
    */
@@ -83,6 +89,31 @@
     }
   }
 
+  private long getReserved(ConfigurationSource conf) {
+    Collection<String> reserveList = conf.getTrimmedStringCollection(
+        HDDS_DATANODE_DIR_DU_RESERVED);
+    for (String reserve : reserveList) {
+      String[] words = reserve.split(":");
+      if (words.length < 2) {
+        LOG.error("Reserved space should config in pair, but current is {}",
+            reserve);
+        continue;
+      }
+
+      if (words[0].trim().equals(rootDir)) {
+        try {
+          StorageSize size = StorageSize.parse(words[1].trim());
+          return (long) size.getUnit().toBytes(size.getValue());
+        } catch (Exception e) {
+          LOG.error("Failed to parse StorageSize:{}", words[1].trim(), e);
+          return 0;
+        }
+      }
+    }
+
+    return 0;
+  }
+
   private VolumeInfo(Builder b) throws IOException {
 
     this.rootDir = b.rootDir;
@@ -108,18 +139,19 @@
     SpaceUsageCheckParams checkParams =
         usageCheckFactory.paramsFor(root);
 
+    this.reservedInBytes = getReserved(b.conf);
     this.usage = new VolumeUsage(checkParams);
   }
 
   public long getCapacity() {
     if (configuredCapacity < 0) {
-      return usage.getCapacity();
+      return Math.max(usage.getCapacity() - reservedInBytes, 0);
     }
     return configuredCapacity;
   }
 
   public long getAvailable() {
-    return usage.getAvailable();
+    return Math.max(usage.getAvailable() - reservedInBytes, 0);
   }
 
   public long getScmUsed() {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 57a0e55..65629fd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -25,10 +25,12 @@
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageSize;
 import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.fs.SpaceUsagePersistence;
 import org.apache.hadoop.hdds.fs.SpaceUsageSource;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 
@@ -51,6 +53,7 @@
   private static final String DATANODE_UUID = UUID.randomUUID().toString();
   private static final String CLUSTER_ID = UUID.randomUUID().toString();
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
+  private static final String RESERVED_SPACE = "100B";
 
   @Rule
   public TemporaryFolder folder = new TemporaryFolder();
@@ -61,6 +64,8 @@
   @Before
   public void setup() throws Exception {
     File rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR);
+    CONF.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, folder.getRoot() +
+        ":" + RESERVED_SPACE);
     volumeBuilder = new HddsVolume.Builder(folder.getRoot().getPath())
         .datanodeUuid(DATANODE_UUID)
         .conf(CONF)
@@ -146,7 +151,13 @@
 
     // Volume.getAvailable() should succeed even when usage thread
     // is shutdown.
-    assertEquals(spaceUsage.getAvailable(), volume.getAvailable());
+    StorageSize size = StorageSize.parse(RESERVED_SPACE);
+    long reservedSpaceInBytes = (long) size.getUnit().toBytes(size.getValue());
+
+    assertEquals(spaceUsage.getCapacity(),
+        volume.getCapacity() + reservedSpaceInBytes);
+    assertEquals(spaceUsage.getAvailable(),
+        volume.getAvailable() + reservedSpaceInBytes);
   }
 
 }