HDFS-1875. MiniDFSCluster hard-codes dfs.datanode.address to localhost. Contributed by Eric Payne.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@1133181 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/CHANGES.txt b/CHANGES.txt
index cba750c..d3da283 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -287,6 +287,9 @@
IMPROVEMENTS
+ HDFS-1875. MiniDFSCluster hard-codes dfs.datanode.address to localhost
+ (Eric Payne via mattf)
+
HDFS-2019. Fix all the places where Java method File.list is used with
FileUtil.list API (Bharath Mundlapudi via mattf)
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java b/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
index 23e1e29..8e19f45 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
@@ -75,10 +75,12 @@
" [-inject startingBlockId numBlocksPerDN]" +
" [-r replicationFactorForInjectedBlocks]" +
" [-d dataNodeDirs]\n" +
+ " [-checkDataNodeAddrConfig]\n" +
" Default datanode direcory is " + DATANODE_DIRS + "\n" +
" Default replication factor for injected blocks is 1\n" +
" Defaul rack is used if -racks is not specified\n" +
- " Data nodes are simulated if -simulated OR conf file specifies simulated\n";
+ " Data nodes are simulated if -simulated OR conf file specifies simulated\n" +
+ " -checkDataNodeAddrConfig tells DataNodeConf to use data node addresses from conf file, if it is set. If not set, use .localhost'.";
static void printUsageExit() {
@@ -97,6 +99,7 @@
long startingBlockId = 1;
int numBlocksPerDNtoInject = 0;
int replication = 1;
+ boolean checkDataNodeAddrConfig = false;
Configuration conf = new HdfsConfiguration();
@@ -139,6 +142,8 @@
printUsageExit("Missing number of blocks to inject");
}
numBlocksPerDNtoInject = Integer.parseInt(args[i]);
+ } else if (args[i].equals("-checkDataNodeAddrConfig")) {
+ checkDataNodeAddrConfig = true;
} else {
printUsageExit();
}
@@ -186,7 +191,7 @@
}
try {
mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR,
- rack4DataNode);
+ rack4DataNode, null, null, false, checkDataNodeAddrConfig);
if (inject) {
long blockSize = 10;
System.out.println("Injecting " + numBlocksPerDNtoInject +
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java b/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
index c8d8a22..26d79d2 100644
--- a/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -752,7 +752,41 @@
String[] racks, String[] hosts,
long[] simulatedCapacities,
boolean setupHostsFile) throws IOException {
+ startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
+ simulatedCapacities, setupHostsFile, false);
+ }
+ /**
+ * Modify the config and start up additional DataNodes. The info port for
+ * DataNodes is guaranteed to use a free port.
+ *
+ * Data nodes can run with the name node in the mini cluster or
+ * a real name node. For example, running with a real name node is useful
+ * when running simulated data nodes with a real name node.
+ * If minicluster's name node is null assume that the conf has been
+ * set with the right address:port of the name node.
+ *
+ * @param conf the base configuration to use in starting the DataNodes. This
+ * will be modified as necessary.
+ * @param numDataNodes Number of DataNodes to start; may be zero
+ * @param manageDfsDirs if true, the data directories for DataNodes will be
+ * created and dfs.datanode.data.dir will be set in the conf
+ * @param operation the operation with which to start the DataNodes. If null
+ * or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+ * @param racks array of strings indicating the rack that each DataNode is on
+ * @param hosts array of strings indicating the hostnames for each DataNode
+ * @param simulatedCapacities array of capacities of the simulated data nodes
+ * @param setupHostsFile add new nodes to dfs hosts files
+ * @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
+ *
+ * @throws IllegalStateException if NameNode has been shutdown
+ */
+ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
+ boolean manageDfsDirs, StartupOption operation,
+ String[] racks, String[] hosts,
+ long[] simulatedCapacities,
+ boolean setupHostsFile,
+ boolean checkDataNodeAddrConfig) throws IOException {
int curDatanodesNum = dataNodes.size();
// for mincluster's the default initialDelay for BRs is 0
if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
@@ -792,7 +826,7 @@
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
Configuration dnConf = new HdfsConfiguration(conf);
// Set up datanode address
- setupDatanodeAddress(dnConf, setupHostsFile);
+ setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
File dir1 = getStorageDir(i, 0);
File dir2 = getStorageDir(i, 1);
@@ -1791,7 +1825,8 @@
return port;
}
- private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile) throws IOException {
+ private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
+ boolean checkDataNodeAddrConfig) throws IOException {
if (setupHostsFile) {
String hostsFile = conf.get(DFSConfigKeys.DFS_HOSTS, "").trim();
if (hostsFile.length() == 0) {
@@ -1799,13 +1834,23 @@
}
// Setup datanode in the include file, if it is defined in the conf
String address = "127.0.0.1:" + getFreeSocketPort();
- conf.set("dfs.datanode.address", address);
+ if (checkDataNodeAddrConfig) {
+ conf.setIfUnset("dfs.datanode.address", address);
+ } else {
+ conf.set("dfs.datanode.address", address);
+ }
addToFile(hostsFile, address);
LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
} else {
- conf.set("dfs.datanode.address", "127.0.0.1:0");
- conf.set("dfs.datanode.http.address", "127.0.0.1:0");
- conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
+ if (checkDataNodeAddrConfig) {
+ conf.setIfUnset("dfs.datanode.address", "127.0.0.1:0");
+ conf.setIfUnset("dfs.datanode.http.address", "127.0.0.1:0");
+ conf.setIfUnset("dfs.datanode.ipc.address", "127.0.0.1:0");
+ } else {
+ conf.set("dfs.datanode.address", "127.0.0.1:0");
+ conf.set("dfs.datanode.http.address", "127.0.0.1:0");
+ conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
+ }
}
}
diff --git a/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
new file mode 100644
index 0000000..1d43ea7
--- /dev/null
+++ b/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Test the MiniDFSCluster functionality that allows "dfs.datanode.address",
+ * "dfs.datanode.http.address", and "dfs.datanode.ipc.address" to be
+ * configurable. The MiniDFSCluster.startDataNodes() API now has a parameter
+ * that will check these properties if told to do so.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+
+
+public class TestDFSAddressConfig extends TestCase {
+
+ public void testDFSAddressConfig() throws IOException {
+ Configuration conf = new HdfsConfiguration();
+
+ /*-------------------------------------------------------------------------
+ * By default, the DataNode socket address should be localhost (127.0.0.1).
+ *------------------------------------------------------------------------*/
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+ cluster.waitActive();
+
+ ArrayList<DataNode> dns = cluster.getDataNodes();
+ DataNode dn = dns.get(0);
+
+ String selfSocketAddr = dn.getSelfAddr().toString();
+ System.out.println("DN Self Socket Addr == " + selfSocketAddr);
+ assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
+
+ /*-------------------------------------------------------------------------
+ * Shut down the datanodes, reconfigure, and bring them back up.
+ * Even if told to use the configuration properties for dfs.datanode,
+ * MiniDFSCluster.startDataNodes() should use localhost as the default if
+ * the dfs.datanode properties are not set.
+ *------------------------------------------------------------------------*/
+ for (int i = 0; i < dns.size(); i++) {
+ DataNodeProperties dnp = cluster.stopDataNode(i);
+ assertNotNull("Should have been able to stop simulated datanode", dnp);
+ }
+
+ conf.unset("dfs.datanode.address");
+ conf.unset("dfs.datanode.http.address");
+ conf.unset("dfs.datanode.ipc.address");
+
+ cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
+ null, null, null, false, true);
+
+ dns = cluster.getDataNodes();
+ dn = dns.get(0);
+
+ selfSocketAddr = dn.getSelfAddr().toString();
+ System.out.println("DN Self Socket Addr == " + selfSocketAddr);
+ // assert that default self socket address is 127.0.0.1
+ assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
+
+ /*-------------------------------------------------------------------------
+ * Shut down the datanodes, reconfigure, and bring them back up.
+ * This time, modify the dfs.datanode properties and make sure that they
+ * are used to configure sockets by MiniDFSCluster.startDataNodes().
+ *------------------------------------------------------------------------*/
+ for (int i = 0; i < dns.size(); i++) {
+ DataNodeProperties dnp = cluster.stopDataNode(i);
+ assertNotNull("Should have been able to stop simulated datanode", dnp);
+ }
+
+ conf.set("dfs.datanode.address","0.0.0.0:0");
+ conf.set("dfs.datanode.http.address","0.0.0.0:0");
+ conf.set("dfs.datanode.ipc.address","0.0.0.0:0");
+
+ cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
+ null, null, null, false, true);
+
+ dns = cluster.getDataNodes();
+ dn = dns.get(0);
+
+ selfSocketAddr = dn.getSelfAddr().toString();
+ System.out.println("DN Self Socket Addr == " + selfSocketAddr);
+ // assert that default self socket address is 0.0.0.0
+ assertTrue(selfSocketAddr.startsWith("/0.0.0.0:"));
+
+ cluster.shutdown();
+ }
+}