HDFS-16772. refreshHostsReader should use the latest configuration (#4890)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a7a2e54..b2c5cb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1327,8 +1327,8 @@
     // Update the file names and refresh internal includes and excludes list.
     if (conf == null) {
       conf = new HdfsConfiguration();
-      this.hostConfigManager.setConf(conf);
     }
+    this.hostConfigManager.setConf(conf);
     this.hostConfigManager.refresh();
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index de738ee..9638f71e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -81,21 +81,21 @@
       datanode.setUpgradeDomain(ud1);
       hostsFileWriter.initIncludeHosts(
           new DatanodeAdminProperties[]{datanode});
-      client.refreshNodes();
+      cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf);
       DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
       assertEquals(all[0].getUpgradeDomain(), ud1);
 
       datanode.setUpgradeDomain(null);
       hostsFileWriter.initIncludeHosts(
           new DatanodeAdminProperties[]{datanode});
-      client.refreshNodes();
+      cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf);
       all = client.datanodeReport(DatanodeReportType.ALL);
       assertEquals(all[0].getUpgradeDomain(), null);
 
       datanode.setUpgradeDomain(ud2);
       hostsFileWriter.initIncludeHosts(
           new DatanodeAdminProperties[]{datanode});
-      client.refreshNodes();
+      cluster.getNamesystem().getBlockManager().getDatanodeManager().refreshNodes(conf);
       all = client.datanodeReport(DatanodeReportType.ALL);
       assertEquals(all[0].getUpgradeDomain(), ud2);
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 7bb2888..420635e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -2310,7 +2310,7 @@
       dnProp.setPort(datanodeID.getXferPort());
       dnProp.setUpgradeDomain(upgradeDomain);
       hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[]{dnProp});
-      cluster.getFileSystem().refreshNodes();
+      cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf);
     }
 
     // create files
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
index e86413d..78ebcb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.lang.management.ManagementFactory;
@@ -175,4 +176,38 @@
       hostsFileWriter.cleanup();
     }
   }
+
+  @Test
+  public void testNewHostAndExcludeFile() throws Exception {
+    Configuration conf = getConf();
+
+    HostsFileWriter writer1 = new HostsFileWriter();
+    writer1.initialize(conf, "old_temp/decommission");
+    writer1.initIncludeHosts(new String[]{"localhost:52", "127.0.0.1:7777"});
+
+    // Write all hosts to a new dfs.hosts file.
+    HostsFileWriter writer2 = new HostsFileWriter();
+    Configuration newConf = new Configuration(getConf());
+    writer2.initialize(newConf, "new_temp/decommission");
+    writer2.initIncludeHosts(new String[]{
+        "localhost:52", "127.0.0.1:7777", "localhost:100"});
+
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      final FSNamesystem ns = cluster.getNameNode().getNamesystem();
+      assertEquals(2, ns.getNumDeadDataNodes());
+      assertEquals(0, ns.getNumLiveDataNodes());
+
+      ns.getBlockManager().getDatanodeManager().refreshNodes(newConf);
+      assertEquals(3, ns.getNumDeadDataNodes());
+      assertEquals(0, ns.getNumLiveDataNodes());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      writer1.cleanup();
+      writer2.cleanup();
+    }
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
index 0421941..abd26d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
@@ -69,11 +69,11 @@
   static final Set<DatanodeID> expectedDatanodeIDs = new HashSet<>();
   private MiniDFSCluster cluster = null;
   private HostsFileWriter hostsFileWriter = new HostsFileWriter();
+  private Configuration conf = new HdfsConfiguration();
 
   @Before
   public void setup() throws IOException {
     StaticMapping.resetMap();
-    Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
     conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
@@ -130,7 +130,7 @@
     datanodes[0].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
     datanodes[5].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
     hostsFileWriter.initIncludeHosts(datanodes);
-    cluster.getFileSystem().refreshNodes();
+    cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf);
 
     expectedDatanodeIDs.clear();
     expectedDatanodeIDs.add(cluster.getDataNodes().get(2).getDatanodeId());
@@ -169,7 +169,7 @@
     datanodes[2].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
     datanodes[3].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
     hostsFileWriter.initIncludeHosts(datanodes);
-    cluster.getFileSystem().refreshNodes();
+    cluster.getNamesystem(0).getBlockManager().getDatanodeManager().refreshNodes(conf);
 
     expectedDatanodeIDs.clear();
     expectedDatanodeIDs.add(cluster.getDataNodes().get(0).getDatanodeId());