HDFS-3418. svn merge -c 1338830 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.0.0-alpha@1338832 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ca0487d..5f76c51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -300,6 +300,9 @@
HADOOP-8285 HDFS changes for Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
+ HDFS-3418. Rename BlockWithLocationsProto datanodeIDs field to storageIDs.
+ (eli)
+
OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 92b7858..93fe249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -254,11 +254,11 @@
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
return BlockWithLocationsProto.newBuilder()
.setBlock(convert(blk.getBlock()))
- .addAllDatanodeIDs(Arrays.asList(blk.getDatanodes())).build();
+ .addAllStorageIDs(Arrays.asList(blk.getStorageIDs())).build();
}
public static BlockWithLocations convert(BlockWithLocationsProto b) {
- return new BlockWithLocations(convert(b.getBlock()), b.getDatanodeIDsList()
+ return new BlockWithLocations(convert(b.getBlock()), b.getStorageIDsList()
.toArray(new String[0]));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 8628f93..eeb9dd4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -205,6 +205,7 @@
private Map<Block, BalancerBlock> globalBlockList
= new HashMap<Block, BalancerBlock>();
private MovedBlocks movedBlocks = new MovedBlocks();
+ // Map storage IDs to BalancerDatanodes
private Map<String, BalancerDatanode> datanodes
= new HashMap<String, BalancerDatanode>();
@@ -620,8 +621,8 @@
synchronized (block) {
// update locations
- for ( String location : blk.getDatanodes() ) {
- BalancerDatanode datanode = datanodes.get(location);
+ for ( String storageID : blk.getStorageIDs() ) {
+ BalancerDatanode datanode = datanodes.get(storageID);
if (datanode != null) { // not an unknown datanode
block.addLocation(datanode);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
index da1c9bd..6c672b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
@@ -21,9 +21,8 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
-/** A class to implement an array of BlockLocations
- * It provide efficient customized serialization/deserialization methods
- * in stead of using the default array (de)serialization provided by RPC
+/**
+ * Maintains an array of blocks and their corresponding storage IDs.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@@ -36,12 +35,12 @@
@InterfaceStability.Evolving
public static class BlockWithLocations {
Block block;
- String datanodeIDs[];
+ String storageIDs[];
/** constructor */
- public BlockWithLocations(Block b, String[] datanodes) {
- block = b;
- datanodeIDs = datanodes;
+ public BlockWithLocations(Block block, String[] storageIDs) {
+ this.block = block;
+ this.storageIDs = storageIDs;
}
/** get the block */
@@ -50,15 +49,15 @@
}
/** get the block's locations */
- public String[] getDatanodes() {
- return datanodeIDs;
+ public String[] getStorageIDs() {
+ return storageIDs;
}
}
private BlockWithLocations[] blocks;
/** Constructor with one parameter */
- public BlocksWithLocations( BlockWithLocations[] blocks ) {
+ public BlocksWithLocations(BlockWithLocations[] blocks) {
this.blocks = blocks;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index 09b72b6..4c4bdb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -274,7 +274,7 @@
*/
message BlockWithLocationsProto {
required BlockProto block = 1; // Block
- repeated string datanodeIDs = 2; // Datanodes with replicas of the block
+ repeated string storageIDs = 2; // Datanodes with replicas of the block
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index 72c27bc..026a8559 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -101,18 +101,18 @@
BlockWithLocations[] locs;
locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
assertEquals(locs.length, 2);
- assertEquals(locs[0].getDatanodes().length, 2);
- assertEquals(locs[1].getDatanodes().length, 2);
+ assertEquals(locs[0].getStorageIDs().length, 2);
+ assertEquals(locs[1].getStorageIDs().length, 2);
// get blocks of size BlockSize from dataNodes[0]
locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
assertEquals(locs.length, 1);
- assertEquals(locs[0].getDatanodes().length, 2);
+ assertEquals(locs[0].getStorageIDs().length, 2);
// get blocks of size 1 from dataNodes[0]
locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
assertEquals(locs.length, 1);
- assertEquals(locs[0].getDatanodes().length, 2);
+ assertEquals(locs[0].getStorageIDs().length, 2);
// get blocks of size 0 from dataNodes[0]
getBlocksWithException(namenode, dataNodes[0], 0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index a6280d3..ced16b8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -161,7 +161,7 @@
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
assertEquals(locs1.getBlock(), locs2.getBlock());
- assertTrue(Arrays.equals(locs1.getDatanodes(), locs2.getDatanodes()));
+ assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs()));
}
@Test