HDDS-7186. Add support for Rocksdb user info LOG configure. (#3731)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index 2ce12ca..6167d71 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.hdds.conf.PostConstruct;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 
+import static java.util.concurrent.TimeUnit.MICROSECONDS;
 import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -72,6 +73,17 @@
       Duration.ofMinutes(10).toMillis();
 
   static final boolean CONTAINER_SCHEMA_V3_ENABLED_DEFAULT = false;
+  static final long ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_DEFAULT = 32 * 1024 * 1024;
+  static final int ROCKSDB_LOG_MAX_FILE_NUM_DEFAULT = 64;
+  static final long ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_DEFAULT =
+      6 * 60 * 60 * 1000 * 1000;
+  public static final String ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_KEY =
+      "hdds.datanode.rocksdb.log.max-file-size";
+  public static final String ROCKSDB_LOG_MAX_FILE_NUM_KEY =
+      "hdds.datanode.rocksdb.log.max-file-num";
+  public static final String
+      ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_KEY =
+      "hdds.datanode.rocksdb.delete_obsolete_files_period";
 
   /**
    * Number of threads per volume that Datanode will use for chunk read.
@@ -297,6 +309,45 @@
   )
   private String containerSchemaV3KeySeparator = "|";
 
+  /**
+   * Following RocksDB related configuration applies to Schema V3 only.
+   */
+  @Config(key = "rocksdb.log.level",
+      defaultValue = "INFO",
+      type = ConfigType.STRING,
+      tags = { DATANODE },
+      description =
+          "The user log level of RocksDB(DEBUG/INFO/WARN/ERROR/FATAL))"
+  )
+  private String rocksdbLogLevel = "INFO";
+
+  @Config(key = "rocksdb.log.max-file-size",
+      defaultValue = "32MB",
+      type = ConfigType.SIZE,
+      tags = { DATANODE },
+      description = "The max size of each user log file of RocksDB. " +
+          "O means no size limit."
+  )
+  private long rocksdbMaxFileSize = ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_DEFAULT;
+
+  @Config(key = "rocksdb.log.max-file-num",
+      defaultValue = "64",
+      type = ConfigType.INT,
+      tags = { DATANODE },
+      description = "The max user log file number to keep for each RocksDB"
+  )
+  private int rocksdbMaxFileNum = ROCKSDB_LOG_MAX_FILE_NUM_DEFAULT;
+
+  @Config(key = "rocksdb.delete_obsolete_files_period",
+      defaultValue = "6h", timeUnit = MICROSECONDS,
+      type = ConfigType.TIME,
+      tags = { DATANODE },
+      description = "Periodicity when obsolete files get deleted. " +
+          "Default is 6h."
+  )
+  private long rocksdbDeleteObsoleteFilesPeriod =
+      ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_DEFAULT;
+
   @PostConstruct
   public void validate() {
     if (containerDeleteThreads < 1) {
@@ -349,6 +400,30 @@
           diskCheckTimeout, DISK_CHECK_TIMEOUT_DEFAULT);
       diskCheckTimeout = DISK_CHECK_TIMEOUT_DEFAULT;
     }
+
+    if (rocksdbMaxFileSize < 0) {
+      LOG.warn(ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_KEY +
+              " must be no less than zero and was set to {}. Defaulting to {}",
+          rocksdbMaxFileSize, ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_DEFAULT);
+      rocksdbMaxFileSize = ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_DEFAULT;
+    }
+
+    if (rocksdbMaxFileNum <= 0) {
+      LOG.warn(ROCKSDB_LOG_MAX_FILE_NUM_KEY +
+              " must be greater than zero and was set to {}. Defaulting to {}",
+          rocksdbMaxFileNum, ROCKSDB_LOG_MAX_FILE_NUM_DEFAULT);
+      rocksdbMaxFileNum = ROCKSDB_LOG_MAX_FILE_NUM_DEFAULT;
+    }
+
+    if (rocksdbDeleteObsoleteFilesPeriod <= 0) {
+      LOG.warn(ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_KEY +
+              " must be greater than zero and was set to {}. Defaulting to {}",
+          rocksdbDeleteObsoleteFilesPeriod,
+          ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_DEFAULT);
+      rocksdbDeleteObsoleteFilesPeriod =
+          ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_DEFAULT;
+    }
+
   }
 
   public void setContainerDeleteThreads(int containerDeleteThreads) {
@@ -455,4 +530,36 @@
   public void setContainerSchemaV3KeySeparator(String separator) {
     this.containerSchemaV3KeySeparator = separator;
   }
+
+  public String getRocksdbLogLevel() {
+    return rocksdbLogLevel;
+  }
+
+  public void setRocksdbLogLevel(String level) {
+    this.rocksdbLogLevel = level;
+  }
+
+  public void setRocksdbMaxFileNum(int count) {
+    this.rocksdbMaxFileNum = count;
+  }
+
+  public int getRocksdbMaxFileNum() {
+    return rocksdbMaxFileNum;
+  }
+
+  public void setRocksdbMaxFileSize(long size) {
+    this.rocksdbMaxFileSize = size;
+  }
+
+  public long getRocksdbMaxFileSize() {
+    return rocksdbMaxFileSize;
+  }
+
+  public long getRocksdbDeleteObsoleteFilesPeriod() {
+    return rocksdbDeleteObsoleteFilesPeriod;
+  }
+
+  public void setRocksdbDeleteObsoleteFilesPeriod(long period) {
+    this.rocksdbDeleteObsoleteFilesPeriod = period;
+  }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 79a55ef..0a3e8c5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -36,7 +36,9 @@
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile;
+import org.rocksdb.InfoLogLevel;
 import org.rocksdb.StatsLevel;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -111,6 +113,19 @@
         options.setMaxTotalWalSize(maxWalSize);
       }
 
+      if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) {
+        DatanodeConfiguration dc =
+            config.getObject(DatanodeConfiguration.class);
+        // Config user log files
+        InfoLogLevel level = InfoLogLevel.valueOf(
+            dc.getRocksdbLogLevel() + "_LEVEL");
+        options.setInfoLogLevel(level);
+        options.setMaxLogFileSize(dc.getRocksdbMaxFileSize());
+        options.setKeepLogFileNum(dc.getRocksdbMaxFileNum());
+        options.setDeleteObsoleteFilesPeriodMicros(
+            dc.getRocksdbDeleteObsoleteFilesPeriod());
+      }
+
       String rocksDbStat = config.getTrimmed(
               OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
               OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);