HDFS-1999. Tests use deprecated configs. Contributed by Aaron T. Myers


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@1127823 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/CHANGES.txt b/CHANGES.txt
index 7219b58..725b739 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -624,6 +624,8 @@
     HDFS-1983. Fix path display for copy and rm commands in TestHDFSCLI and
     TestDFSShell. (Daryn Sharp via todd)
 
+    HDFS-1999. Tests use deprecated configs. (Aaron T. Myers via eli)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java b/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
index eb58db5..75e751f 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -305,7 +305,7 @@
    * Servers will be started on free ports.
    * <p>
    * The caller must manage the creation of NameNode and DataNode directories
-   * and have already set dfs.name.dir and dfs.data.dir in the given conf.
+   * and have already set dfs.namenode.name.dir and dfs.datanode.data.dir in the given conf.
    * 
    * @param conf the base configuration to use in starting the servers.  This
    *          will be modified as necessary.
@@ -377,7 +377,7 @@
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -407,7 +407,7 @@
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -439,9 +439,9 @@
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param manageNameDfsDirs if true, the data directories for servers will be
-   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   *          created and dfs.namenode.name.dir and dfs.datanode.data.dir will be set in the conf
    * @param manageDataDfsDirs if true, the data directories for datanodes will
-   *          be created and dfs.data.dir set to same in the conf
+   *          be created and dfs.datanode.data.dir set to same in the conf
    * @param operation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -706,7 +706,7 @@
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -737,7 +737,7 @@
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -862,7 +862,7 @@
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
@@ -892,7 +892,7 @@
    *          will be modified as necessary.
    * @param numDataNodes Number of DataNodes to start; may be zero
    * @param manageDfsDirs if true, the data directories for DataNodes will be
-   *          created and dfs.data.dir will be set in the conf
+   *          created and dfs.datanode.data.dir will be set in the conf
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
index 2b6e93b..e0e3013 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
@@ -110,7 +110,7 @@
   
   /**
    * Sets up the storage directories for namenode as defined by
-   * dfs.name.dir. For each element in dfs.name.dir, the subdirectories 
+   * dfs.namenode.name.dir. For each element in dfs.namenode.name.dir, the subdirectories 
    * represented by the first four elements of the <code>state</code> array
    * will be created and populated.
    * 
@@ -139,7 +139,7 @@
   
   /**
    * Sets up the storage directories for a datanode under
-   * dfs.data.dir. For each element in dfs.data.dir, the subdirectories 
+   * dfs.datanode.data.dir. For each element in dfs.datanode.data.dir, the subdirectories 
    * represented by the first four elements of the <code>state</code> array 
    * will be created and populated. 
    * See {@link UpgradeUtilities#createDataNodeStorageDirs()}
@@ -167,7 +167,7 @@
   
   /**
    * Sets up the storage directories for a block pool under
-   * dfs.data.dir. For each element in dfs.data.dir, the subdirectories 
+   * dfs.datanode.data.dir. For each element in dfs.datanode.data.dir, the subdirectories 
    * represented by the first four elements of the <code>state</code> array 
    * will be created and populated. 
    * See {@link UpgradeUtilities#createBlockPoolStorageDirs()}
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index d594de1..9950062 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -135,7 +135,8 @@
     
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         fileAsURI(new File(hdfsDir, "name2")).toString());
-    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+        "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
     
     // Start BackupNode
     String[] args = new String [] { StartupOption.BACKUP.getName() };
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java b/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
index 93af348..ac62f06 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -45,7 +45,7 @@
    * Name-node should stay in automatic safe-mode.</li>
    * <li>Enter safe mode manually.</li>
    * <li>Start the data-node.</li>
-   * <li>Wait longer than <tt>dfs.safemode.extension</tt> and 
+   * <li>Wait longer than <tt>dfs.namenode.safemode.extension</tt> and 
    * verify that the name-node is still in safe mode.</li>
    * </ol>
    *  
@@ -92,7 +92,7 @@
       
       LOG.info("Datanode is started.");
 
-      // wait longer than dfs.safemode.extension
+      // wait longer than dfs.namenode.safemode.extension
       try {
         Thread.sleep(2000);
       } catch (InterruptedException ignored) {}
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java b/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
index 5685317..b84e4ff 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -180,7 +180,7 @@
   }
   
   /**
-   * Initialize dfs.name.dir and dfs.data.dir with the specified number of
+   * Initialize dfs.namenode.name.dir and dfs.datanode.data.dir with the specified number of
    * directory entries. Also initialize dfs.blockreport.intervalMsec.
    */
   public static Configuration initializeStorageStateConf(int numDirs,
@@ -305,7 +305,7 @@
   }
   
   /**
-   * Simulate the <code>dfs.name.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.namenode.name.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of namenode storage directory that comes from a singleton
    * namenode master (that contains edits, fsimage, version and time files). 
@@ -332,7 +332,7 @@
   }  
   
   /**
-   * Simulate the <code>dfs.data.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.datanode.data.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of datanode storage directory that comes from a singleton
    * datanode master (that contains version and block files). If the destination
@@ -359,7 +359,7 @@
   }
   
   /**
-   * Simulate the <code>dfs.data.dir</code> of a populated DFS filesystem.
+   * Simulate the <code>dfs.datanode.data.dir</code> of a populated DFS filesystem.
    * This method populates for each parent directory, <code>parent/dirName</code>
    * with the content of block pool storage directory that comes from a singleton
    * datanode master (that contains version and block files). If the destination
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt b/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
index 6f79f43..3d1b67d 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
@@ -40,7 +40,7 @@
 # some recoverable errors (i.e. corrupt or missing .crc files).
 #
 # A similar set of files exist in two different DFS directories. 
-# For e.g. "top-dir-1Mb-512" contains files created with dfs.block.size of 1Mb 
+# For e.g. "top-dir-1Mb-512" contains files created with dfs.blocksize of 1Mb 
 # and io.bytes.per.checksum of 512.
 #
 # In the future, when Hadoop project no longer supports upgrade from
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index 3fd7872..809481b 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -181,7 +181,7 @@
       DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
     FsPermission expected = new FsPermission(permStr);
 
-    // Check permissions on directories in 'dfs.data.dir'
+    // Check permissions on directories in 'dfs.datanode.data.dir'
     FileSystem localFS = FileSystem.getLocal(conf);
     for (DataNode dn : cluster.getDataNodes()) {
       String[] dataDirs =
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
index b651737..4db05ff 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
@@ -36,7 +36,7 @@
  *   
  *   Create a name node's edits log in /tmp/EditsLogOut.
  *   The file /tmp/EditsLogOut/current/edits can be copied to a name node's
- *   dfs.name.dir/current direcotry and the name node can be started as usual.
+ *   dfs.namenode.name.dir/current direcotry and the name node can be started as usual.
  *   
  *   The files are created in /createdViaInjectingInEditsLog
  *   The file names contain the starting and ending blockIds; hence once can 
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
index 134195f..02ab5e6 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
@@ -85,7 +85,8 @@
     Configuration c = new HdfsConfiguration(conf);
     String dirs = getBackupNodeDir(t, i);
     c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
-    c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}");
+    c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+        "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
     return (BackupNode)NameNode.createNameNode(new String[]{t.getName()}, c);
   }
 
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
index ec926c4..363a844 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
@@ -30,8 +30,8 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 /**
- * This class tests various combinations of dfs.name.dir 
- * and dfs.name.edits.dir configurations.
+ * This class tests various combinations of dfs.namenode.name.dir 
+ * and dfs.namenode.edits.dir configurations.
  */
 public class TestNameEditsConfigs extends TestCase {
   static final long SEED = 0xDEADBEEFL;
@@ -100,7 +100,7 @@
   }
 
   /**
-   * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
+   * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * The test creates files and restarts cluster with different configs.
    * 1. Starts cluster with shared name and edits dirs
    * 2. Restarts cluster by adding additional (different) name and edits dirs
@@ -127,7 +127,7 @@
     File checkpointEditsDir = new File(base_dir, "secondedits");
     File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
     
-    // Start namenode with same dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
@@ -154,7 +154,7 @@
       secondary.shutdown();
     }
 
-    // Start namenode with additional dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf =  new HdfsConfiguration();
     assertTrue(newNameDir.mkdir());
     assertTrue(newEditsDir.mkdir());
@@ -282,7 +282,7 @@
   }
 
   /**
-   * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
+   * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * This test tries to simulate failure scenarios.
    * 1. Start cluster with shared name and edits dir
    * 2. Restart cluster by adding separate name and edits dirs
@@ -303,7 +303,7 @@
     File newEditsDir = new File(base_dir, "edits");
     File nameAndEdits = new File(base_dir, "name_and_edits");
     
-    // Start namenode with same dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
@@ -325,7 +325,7 @@
       cluster.shutdown();
     }
 
-    // Start namenode with additional dfs.name.dir and dfs.name.edits.dir
+    // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
     conf =  new HdfsConfiguration();
     assertTrue(newNameDir.mkdir());
     assertTrue(newEditsDir.mkdir());
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
index cdfe265..875fc33 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
@@ -49,7 +49,7 @@
    * Name-node should stay in automatic safe-mode.</li>
    * <li>Enter safe mode manually.</li>
    * <li>Start the data-node.</li>
-   * <li>Wait longer than <tt>dfs.safemode.extension</tt> and 
+   * <li>Wait longer than <tt>dfs.namenode.safemode.extension</tt> and 
    * verify that the name-node is still in safe mode.</li>
    * </ol>
    *  
@@ -96,7 +96,7 @@
       
       LOG.info("Datanode is started.");
 
-      // wait longer than dfs.safemode.extension
+      // wait longer than dfs.namenode.safemode.extension
       try {
         Thread.sleep(2000);
       } catch (InterruptedException ignored) {}
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 4b816d7..f785477 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -367,11 +367,12 @@
     LOG.info("Test compressing image.");
     Configuration conf = new Configuration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
-    conf.set("dfs.http.address", "127.0.0.1:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
     File base_dir = new File(System.getProperty(
         "test.build.data", "build/test/data"), "dfs/");
-    conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
-    conf.setBoolean("dfs.permissions", false);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        new File(base_dir, "name").getPath());
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
 
     DFSTestUtil.formatNameNode(conf);
 
@@ -426,11 +427,12 @@
   private void testImageChecksum(boolean compress) throws Exception {
     Configuration conf = new Configuration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
-    conf.set("dfs.http.address", "127.0.0.1:0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
     File base_dir = new File(
         System.getProperty("test.build.data", "build/test/data"), "dfs/");
-    conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
-    conf.setBoolean("dfs.permissions", false);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        new File(base_dir, "name").getPath());
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
     if (compress) {
       conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
     }
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
index b4d0e51..23922ae 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
@@ -340,7 +340,7 @@
 
       FSImage fsi = cluster.getNameNode().getFSImage();
 
-      // it is started with dfs.name.dir.restore set to true (in SetUp())
+      // it is started with dfs.namenode.name.dir.restore set to true (in SetUp())
       boolean restore = fsi.getStorage().getRestoreFailedStorage();
       LOG.info("Restore is " + restore);
       assertEquals(restore, true);
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 655a86c..c70002b 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -89,7 +89,7 @@
   }
 
   private void updateMetrics() throws Exception {
-    // Wait for metrics update (corresponds to dfs.replication.interval
+    // Wait for metrics update (corresponds to dfs.namenode.replication.interval
     // for some block related metrics to get updated)
     Thread.sleep(1000);
   }
diff --git a/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java b/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
index 0b32915..bf29605 100644
--- a/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
+++ b/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
@@ -34,10 +34,12 @@
   }
 
   public String[] getHDFSDataDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings("dfs.data.dir");
+    return getProxy().getDaemonConf().getStrings(
+        DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
   }
 
   public String getHDFSNameDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings("dfs.name.dir")[0];
+    return getProxy().getDaemonConf().getStrings(
+        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY)[0];
   }
 }