HDFS-7285. Erasure Coding Support inside HDFS
Fixed compilation errors after rebase (Vinayakumar B)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
similarity index 100%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingZone.java
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 59f14d5..d7b292e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolarent;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
import org.apache.hadoop.http.HttpConfig;
@@ -440,7 +440,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_REPLICATION_MAX_KEY = "dfs.replication.max";
public static final int DFS_REPLICATION_MAX_DEFAULT = 512;
public static final String DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY = "dfs.block.placement.ec.classname";
- public static final Class<BlockPlacementPolicyRackFaultTolarent> DFS_BLOCK_PLACEMENT_EC_CLASSNAME_DEFAULT = BlockPlacementPolicyRackFaultTolarent.class;
+ public static final Class<BlockPlacementPolicyRackFaultTolerant> DFS_BLOCK_PLACEMENT_EC_CLASSNAME_DEFAULT = BlockPlacementPolicyRackFaultTolerant.class;
public static final String DFS_DF_INTERVAL_KEY = "dfs.df.interval";
public static final int DFS_DF_INTERVAL_DEFAULT = 60000;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 689c0ee..aca5120 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -701,7 +701,7 @@ private long getBlockList() throws IOException {
if (LOG.isTraceEnabled()) {
LOG.trace("getBlocks(" + getDatanodeInfo() + ", "
+ StringUtils.TraditionalBinaryPrefix.long2String(size, "B", 2)
- + ") returns " + newBlocks.getBlocks().length + " blocks.");
+ + ") returns " + newBlksLocs.getBlocks().length + " blocks.");
}
long bytesReceived = 0;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 28cf383..eed05c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3404,7 +3404,7 @@ public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
}
CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks()
- .get(new CachedBlock(block.getBlockId(), (short) 0, false));
+ .get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false));
if (cblock != null) {
boolean removed = false;
removed |= node.getPendingCached().remove(cblock);
@@ -3412,7 +3412,7 @@ public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
removed |= node.getPendingUncached().remove(cblock);
if (removed) {
blockLog.debug("BLOCK* removeStoredBlock: {} removed from caching "
- + "related lists on node {}", block, node);
+ + "related lists on node {}", storedBlock, node);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 863bd4f..c4cfd34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -182,10 +182,12 @@ static GetBlockLocationsResult getBlockLocations(
final FileEncryptionInfo feInfo = isReservedName ? null
: fsd.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip);
+ final ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
+ fsd.getFSNamesystem(), iip);
final LocatedBlocks blocks = bm.createLocatedBlocks(
inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset,
- length, needBlockToken, iip.isSnapshot(), feInfo);
+ length, needBlockToken, iip.isSnapshot(), feInfo, ecZone);
// Set caching information for the located blocks.
for (LocatedBlock lb : blocks.getLocatedBlocks()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index a4c5948..3c149d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -73,6 +73,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli;
import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters;
import org.apache.hadoop.hdfs.server.balancer.Balancer.Result;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
index bae4f1d..835a24d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
@@ -93,12 +93,12 @@ public void testReplaceStorage() throws Exception {
// Create a few dummy blocks and add them to the first storage.
for (int i = 0; i < NUM_BLOCKS; ++i) {
blockInfos[i] = new BlockInfoContiguous((short) 3);
- storage1.addBlock(blockInfos[i]);
+ storage1.addBlock(blockInfos[i], blockInfos[i]);
}
// Try to move one of the blocks to a different storage.
- boolean added =
- storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
+ boolean added = storage2.addBlock(blockInfos[NUM_BLOCKS / 2],
+ blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
Assert.assertThat(added, is(false));
Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
@@ -119,7 +119,7 @@ public void testBlockListMoveToHead() throws Exception {
for (int i = 0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3));
- dd.addBlock(blockInfoList.get(i));
+ dd.addBlock(blockInfoList.get(i), blockInfoList.get(i));
// index of the datanode should be 0
assertEquals("Find datanode should be 0", 0, blockInfoList.get(i)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
index ca27bb7..58ce1b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
@@ -63,16 +63,16 @@ public void testBlocksCounter() throws Exception {
DatanodeStorageInfo[] storages = dd.getStorageInfos();
assertTrue(storages.length > 0);
// add first block
- assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED);
+ assertTrue(storages[0].addBlock(blk, blk) == AddBlockResult.ADDED);
assertEquals(1, dd.numBlocks());
// remove a non-existent block
assertFalse(dd.removeBlock(blk1));
assertEquals(1, dd.numBlocks());
// add an existent block
- assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED);
+ assertFalse(storages[0].addBlock(blk, blk) == AddBlockResult.ADDED);
assertEquals(1, dd.numBlocks());
// add second block
- assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED);
+ assertTrue(storages[0].addBlock(blk1, blk1) == AddBlockResult.ADDED);
assertEquals(2, dd.numBlocks());
// remove first block
assertTrue(dd.removeBlock(blk));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index eec957f..4324be7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1248,7 +1248,8 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication()
DatanodeStorageInfo[] storageAry = {new DatanodeStorageInfo(
dataNodes[0], new DatanodeStorage("s1"))};
final BlockInfoContiguousUnderConstruction ucBlock =
- info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION,
+ ((BlockInfoContiguous) info)
+ .convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION,
storageAry);
DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class);
DatanodeDescriptor dn = mock(DatanodeDescriptor.class);
@@ -1256,8 +1257,8 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication()
when(storage.getState()).thenReturn(DatanodeStorage.State.NORMAL);
when(storage.getDatanodeDescriptor()).thenReturn(dn);
when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true);
- when(storage.addBlock(any(BlockInfo.class))).thenReturn
- (DatanodeStorageInfo.AddBlockResult.ADDED);
+ when(storage.addBlock(any(BlockInfo.class), any(BlockInfo.class)))
+ .thenReturn(DatanodeStorageInfo.AddBlockResult.ADDED);
ucBlock.addStorage(storage, ucBlock);
BlockInfo lastBlk = mbc.getLastBlock();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index c5262d4..ec3d924 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -162,7 +162,8 @@ public void testDeadNodeAsBlockTarget() throws Exception {
// choose the targets, but local node should not get selected as this is not
// part of the cluster anymore
DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
- clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7);
+ clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7,
+ false);
for (DatanodeStorageInfo datanodeStorageInfo : results) {
assertFalse("Dead node should not be choosen", datanodeStorageInfo
.getDatanodeDescriptor().equals(clientNode));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
index 9c585a4..f81005f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
@@ -28,7 +28,6 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.junit.After;
@@ -69,7 +68,7 @@ public void setUp() throws IOException {
dfs.getClient().createErasureCodingZone(ecDir.toString(), ecSchema, 0);
dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
- dfs.setStoragePolicy(ecDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME);
+ dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
}
@After