HDFS-15854. Make some parameters configurable for SlowDiskTracker and SlowPeerTracker (#2718)

Authored-by: tomscut <litao@bigo.sg>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d04a842..790d1aa 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -676,6 +676,10 @@
       "dfs.datanode.slowpeer.low.threshold.ms";
   public static final long DFS_DATANODE_SLOWPEER_LOW_THRESHOLD_MS_DEFAULT =
       5L;
+  public static final String DFS_DATANODE_MAX_NODES_TO_REPORT_KEY =
+      "dfs.datanode.max.nodes.to.report";
+  public static final int DFS_DATANODE_MAX_NODES_TO_REPORT_DEFAULT =
+      5;
   public static final String DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY =
       "dfs.datanode.min.outlier.detection.disks";
   public static final long DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_DEFAULT =
@@ -684,6 +688,10 @@
       "dfs.datanode.slowdisk.low.threshold.ms";
   public static final long DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_DEFAULT =
       20L;
+  public static final String DFS_DATANODE_MAX_DISKS_TO_REPORT_KEY =
+      "dfs.datanode.max.disks.to.report";
+  public static final int DFS_DATANODE_MAX_DISKS_TO_REPORT_DEFAULT =
+      5;
   public static final String  DFS_DATANODE_HOST_NAME_KEY =
       HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_HOST_NAME_KEY;
   public static final String  DFS_NAMENODE_CHECKPOINT_DIR_KEY =
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
index 08ebf8e..7823401 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
@@ -77,7 +77,7 @@
    * Number of disks to include in JSON report per operation. We will return
    * disks with the highest latency.
    */
-  private static final int MAX_DISKS_TO_REPORT = 5;
+  private final int maxDisksToReport;
   private static final String DATANODE_DISK_SEPARATOR = ":";
   private final long reportGenerationIntervalMs;
 
@@ -107,6 +107,9 @@
         DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
         DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT,
         TimeUnit.MILLISECONDS);
+    this.maxDisksToReport = conf.getInt(
+        DFSConfigKeys.DFS_DATANODE_MAX_DISKS_TO_REPORT_KEY,
+        DFSConfigKeys.DFS_DATANODE_MAX_DISKS_TO_REPORT_DEFAULT);
     this.reportValidityMs = reportGenerationIntervalMs * 3;
   }
 
@@ -153,7 +156,7 @@
         @Override
         public void run() {
           slowDisksReport = getSlowDisks(diskIDLatencyMap,
-              MAX_DISKS_TO_REPORT, now);
+              maxDisksToReport, now);
 
           cleanUpOldReports(now);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
index 5b30b73..06dd2c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
@@ -79,7 +79,7 @@
    * Number of nodes to include in JSON report. We will return nodes with
    * the highest number of votes from peers.
    */
-  private static final int MAX_NODES_TO_REPORT = 5;
+  private final int maxNodesToReport;
 
   /**
    * Information about peers that have reported a node as being slow.
@@ -103,6 +103,9 @@
         DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
         DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT,
         TimeUnit.MILLISECONDS) * 3;
+    this.maxNodesToReport = conf.getInt(
+        DFSConfigKeys.DFS_DATANODE_MAX_NODES_TO_REPORT_KEY,
+        DFSConfigKeys.DFS_DATANODE_MAX_NODES_TO_REPORT_DEFAULT);
   }
 
   /**
@@ -193,7 +196,7 @@
    */
   public String getJson() {
     Collection<ReportForJson> validReports = getJsonReports(
-        MAX_NODES_TO_REPORT);
+        maxNodesToReport);
     try {
       return WRITER.writeValueAsString(validReports);
     } catch (JsonProcessingException e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 0c5bb35..56c65b5 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2347,6 +2347,15 @@
 </property>
 
 <property>
+  <name>dfs.datanode.max.nodes.to.report</name>
+  <value>5</value>
+  <description>
+    Number of nodes to include in JSON report. We will return nodes with
+    the highest number of votes from peers.
+  </description>
+</property>
+
+<property>
   <name>dfs.datanode.outliers.report.interval</name>
   <value>30m</value>
   <description>
@@ -2387,6 +2396,15 @@
 </property>
 
 <property>
+  <name>dfs.datanode.max.disks.to.report</name>
+  <value>5</value>
+  <description>
+    Number of disks to include in JSON report per operation. We will return
+    disks with the highest latency.
+  </description>
+</property>
+
+<property>
   <name>hadoop.user.group.metrics.percentiles.intervals</name>
   <value></value>
   <description>