HDDS-567. Rename Mapping to ContainerManager in SCM. Contributed by Nanda kumar.
(cherry picked from commit 095c269620e01ce46832ea25e696c0ab71613ea3)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 4777016..30740c7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmUtils;
-import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -70,7 +70,7 @@
// by itself and does not rely on the Block service offered by SCM.
private final NodeManager nodeManager;
- private final Mapping containerManager;
+ private final ContainerManager containerManager;
private final long containerSize;
@@ -92,7 +92,7 @@
* @throws IOException
*/
public BlockManagerImpl(final Configuration conf,
- final NodeManager nodeManager, final Mapping containerManager,
+ final NodeManager nodeManager, final ContainerManager containerManager,
EventPublisher eventPublisher)
throws IOException {
this.nodeManager = nodeManager;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index 8702a42..c86f9cd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -17,7 +17,7 @@
package org.apache.hadoop.hdds.scm.block;
import com.google.common.collect.ArrayListMultimap;
-import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
@@ -42,15 +42,15 @@
private int maximumAllowedTXNum;
// Current counter of inserted TX.
private int currentTXNum;
- private Mapping mappingService;
+ private ContainerManager containerManager;
// A list of TXs mapped to a certain datanode ID.
private final ArrayListMultimap<UUID, DeletedBlocksTransaction>
transactions;
- DatanodeDeletedBlockTransactions(Mapping mappingService,
+ DatanodeDeletedBlockTransactions(ContainerManager containerManager,
int maximumAllowedTXNum, int nodeNum) {
this.transactions = ArrayListMultimap.create();
- this.mappingService = mappingService;
+ this.containerManager = containerManager;
this.maximumAllowedTXNum = maximumAllowedTXNum;
this.nodeNum = nodeNum;
}
@@ -60,7 +60,7 @@
Pipeline pipeline = null;
try {
ContainerWithPipeline containerWithPipeline =
- mappingService.getContainerWithPipeline(tx.getContainerID());
+ containerManager.getContainerWithPipeline(tx.getContainerID());
if (containerWithPipeline.getContainerInfo().isContainerOpen()
|| containerWithPipeline.getPipeline().isEmpty()) {
return false;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 68435d1..5d3afd5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -29,7 +29,7 @@
.DeleteBlockTransactionResult;
import org.apache.hadoop.hdds.scm.command
.CommandStatusReportHandler.DeleteBlockStatus;
-import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -92,15 +92,15 @@
private final int maxRetry;
private final MetadataStore deletedStore;
- private final Mapping containerManager;
+ private final ContainerManager containerManager;
private final Lock lock;
// The latest id of deleted blocks in the db.
private long lastTxID;
// Maps txId to set of DNs which are successful in committing the transaction
private Map<Long, Set<UUID>> transactionToDNsCommitMap;
- public DeletedBlockLogImpl(Configuration conf, Mapping containerManager)
- throws IOException {
+ public DeletedBlockLogImpl(Configuration conf,
+ ContainerManager containerManager) throws IOException {
maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index b85d77f..8e07fa2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -19,7 +19,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -63,7 +63,7 @@
// ThreadPoolSize=2, 1 for scheduler and the other for the scanner.
private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 2;
private final DeletedBlockLog deletedBlockLog;
- private final Mapping mappingService;
+ private final ContainerManager containerManager;
private final NodeManager nodeManager;
private final EventPublisher eventPublisher;
@@ -81,12 +81,13 @@
private int blockDeleteLimitSize;
public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
- Mapping mapper, NodeManager nodeManager, EventPublisher eventPublisher,
- long interval, long serviceTimeout, Configuration conf) {
+ ContainerManager containerManager, NodeManager nodeManager,
+ EventPublisher eventPublisher, long interval, long serviceTimeout,
+ Configuration conf) {
super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS,
BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
this.deletedBlockLog = deletedBlockLog;
- this.mappingService = mapper;
+ this.containerManager = containerManager;
this.nodeManager = nodeManager;
this.eventPublisher = eventPublisher;
@@ -139,7 +140,7 @@
List<DatanodeDetails> datanodes = nodeManager.getNodes(NodeState.HEALTHY);
Map<Long, Long> transactionMap = null;
if (datanodes != null) {
- transactions = new DatanodeDeletedBlockTransactions(mappingService,
+ transactions = new DatanodeDeletedBlockTransactions(containerManager,
blockDeleteLimitSize, datanodes.size());
try {
transactionMap = deletedBlockLog.getTransactions(transactions);
@@ -174,7 +175,7 @@
transactions.getTransactionIDList(dnId)));
}
}
- mappingService.updateDeleteTransactionId(transactionMap);
+ containerManager.updateDeleteTransactionId(transactionMap);
}
if (dnTxCount > 0) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index 7baecc4..8be7803 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -46,9 +46,9 @@
LoggerFactory.getLogger(CloseContainerEventHandler.class);
- private final Mapping containerManager;
+ private final ContainerManager containerManager;
- public CloseContainerEventHandler(Mapping containerManager) {
+ public CloseContainerEventHandler(ContainerManager containerManager) {
this.containerManager = containerManager;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerWatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerWatcher.java
index 8e277b9..7b94bd2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerWatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerWatcher.java
@@ -44,11 +44,11 @@
public static final Logger LOG =
LoggerFactory.getLogger(CloseContainerWatcher.class);
- private final Mapping containerManager;
+ private final ContainerManager containerManager;
public CloseContainerWatcher(Event<CloseContainerRetryableReq> startEvent,
Event<CloseContainerStatus> completionEvent,
- LeaseManager<Long> leaseManager, Mapping containerManager) {
+ LeaseManager<Long> leaseManager, ContainerManager containerManager) {
super(startEvent, completionEvent, leaseManager);
this.containerManager = containerManager;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
similarity index 95%
rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index 5ed80cb..e586f3e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -33,10 +33,11 @@
import java.util.Map;
/**
- * Mapping class contains the mapping from a name to a pipeline mapping. This is
- * used by SCM when allocating new locations and when looking up a key.
+ * ContainerManager class contains the mapping from a name to a pipeline
+ * mapping. This is used by SCM when allocating new locations and when
+ * looking up a key.
*/
-public interface Mapping extends Closeable {
+public interface ContainerManager extends Closeable {
/**
* Returns the ContainerInfo from the container ID.
*
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 71935f0..0f824a0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -50,21 +50,21 @@
private final NodeManager nodeManager;
- private final Mapping containerMapping;
+ private final ContainerManager containerManager;
private ContainerStateManager containerStateManager;
private ReplicationActivityStatus replicationStatus;
- public ContainerReportHandler(Mapping containerMapping,
+ public ContainerReportHandler(ContainerManager containerManager,
NodeManager nodeManager,
ReplicationActivityStatus replicationActivityStatus) {
- Preconditions.checkNotNull(containerMapping);
+ Preconditions.checkNotNull(containerManager);
Preconditions.checkNotNull(nodeManager);
Preconditions.checkNotNull(replicationActivityStatus);
- this.containerStateManager = containerMapping.getStateManager();
+ this.containerStateManager = containerManager.getStateManager();
this.nodeManager = nodeManager;
- this.containerMapping = containerMapping;
+ this.containerManager = containerManager;
this.replicationStatus = replicationActivityStatus;
}
@@ -80,7 +80,7 @@
try {
//update state in container db and trigger close container events
- containerMapping
+ containerManager
.processContainerReports(datanodeOrigin, containerReport, false);
Set<ContainerID> containerIds = containerReport.getReportsList().stream()
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 930c098..b8e4e89 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -136,7 +136,7 @@
*/
@SuppressWarnings("unchecked")
public ContainerStateManager(Configuration configuration,
- Mapping containerMapping, PipelineSelector pipelineSelector) {
+ ContainerManager containerManager, PipelineSelector pipelineSelector) {
// Initialize the container state machine.
Set<HddsProtos.LifeCycleState> finalStates = new HashSet();
@@ -158,15 +158,15 @@
lastUsedMap = new ConcurrentHashMap<>();
containerCount = new AtomicLong(0);
containers = new ContainerStateMap();
- loadExistingContainers(containerMapping, pipelineSelector);
+ loadExistingContainers(containerManager, pipelineSelector);
}
- private void loadExistingContainers(Mapping containerMapping,
+ private void loadExistingContainers(ContainerManager containerManager,
PipelineSelector pipelineSelector) {
List<ContainerInfo> containerList;
try {
- containerList = containerMapping.listContainer(0, Integer.MAX_VALUE);
+ containerList = containerManager.listContainer(0, Integer.MAX_VALUE);
// if there are no container to load, let us return.
if (containerList == null || containerList.size() == 0) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
similarity index 98%
rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 71e17e9..df26e36 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -74,12 +74,12 @@
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
/**
- * Mapping class contains the mapping from a name to a pipeline mapping. This
- * is used by SCM when
- * allocating new locations and when looking up a key.
+ * ContainerManager class contains the mapping from a name to a pipeline
+ * mapping. This is used by SCM when allocating new locations and when
+ * looking up a key.
*/
-public class ContainerMapping implements Mapping {
- private static final Logger LOG = LoggerFactory.getLogger(ContainerMapping
+public class SCMContainerManager implements ContainerManager {
+ private static final Logger LOG = LoggerFactory.getLogger(SCMContainerManager
.class);
private final NodeManager nodeManager;
@@ -108,7 +108,7 @@
* @throws IOException on Failure.
*/
@SuppressWarnings("unchecked")
- public ContainerMapping(
+ public SCMContainerManager(
final Configuration conf, final NodeManager nodeManager, final int
cacheSizeMB, EventPublisher eventPublisher) throws IOException {
this.nodeManager = nodeManager;
@@ -653,7 +653,7 @@
/**
* Since allocatedBytes of a container is only in memory, stored in
- * containerStateManager, when closing ContainerMapping, we need to update
+ * containerStateManager, when closing SCMContainerManager, we need to update
* this in the container store.
*
* @throws IOException on failure.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 66136f1..4eedf1f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -160,7 +160,7 @@
String remoteUser = getRpcRemoteUsername();
getScm().checkAdminAccess(remoteUser);
- return scm.getScmContainerManager()
+ return scm.getContainerManager()
.allocateContainer(replicationType, factor, owner);
}
@@ -168,7 +168,7 @@
public ContainerInfo getContainer(long containerID) throws IOException {
String remoteUser = getRpcRemoteUsername();
getScm().checkAdminAccess(remoteUser);
- return scm.getScmContainerManager()
+ return scm.getContainerManager()
.getContainer(containerID);
}
@@ -176,7 +176,7 @@
public ContainerWithPipeline getContainerWithPipeline(long containerID)
throws IOException {
if (chillModePrecheck.isInChillMode()) {
- ContainerInfo contInfo = scm.getScmContainerManager()
+ ContainerInfo contInfo = scm.getContainerManager()
.getContainer(containerID);
if (contInfo.isContainerOpen()) {
if (!hasRequiredReplicas(contInfo)) {
@@ -188,7 +188,7 @@
}
String remoteUser = getRpcRemoteUsername();
getScm().checkAdminAccess(remoteUser);
- return scm.getScmContainerManager()
+ return scm.getContainerManager()
.getContainerWithPipeline(containerID);
}
@@ -198,7 +198,7 @@
*/
private boolean hasRequiredReplicas(ContainerInfo contInfo) {
try{
- return getScm().getScmContainerManager().getStateManager()
+ return getScm().getContainerManager().getStateManager()
.getContainerReplicas(contInfo.containerID())
.size() >= contInfo.getReplicationFactor().getNumber();
} catch (SCMException ex) {
@@ -211,7 +211,7 @@
@Override
public List<ContainerInfo> listContainer(long startContainerID,
int count) throws IOException {
- return scm.getScmContainerManager().
+ return scm.getContainerManager().
listContainer(startContainerID, count);
}
@@ -219,7 +219,7 @@
public void deleteContainer(long containerID) throws IOException {
String remoteUser = getRpcRemoteUsername();
getScm().checkAdminAccess(remoteUser);
- scm.getScmContainerManager().deleteContainer(containerID);
+ scm.getContainerManager().deleteContainer(containerID);
}
@@ -257,10 +257,10 @@
.ObjectStageChangeRequestProto.Op.create) {
if (stage == StorageContainerLocationProtocolProtos
.ObjectStageChangeRequestProto.Stage.begin) {
- scm.getScmContainerManager().updateContainerState(id, HddsProtos
+ scm.getContainerManager().updateContainerState(id, HddsProtos
.LifeCycleEvent.CREATE);
} else {
- scm.getScmContainerManager().updateContainerState(id, HddsProtos
+ scm.getContainerManager().updateContainerState(id, HddsProtos
.LifeCycleEvent.CREATED);
}
} else {
@@ -268,10 +268,10 @@
.ObjectStageChangeRequestProto.Op.close) {
if (stage == StorageContainerLocationProtocolProtos
.ObjectStageChangeRequestProto.Stage.begin) {
- scm.getScmContainerManager().updateContainerState(id, HddsProtos
+ scm.getContainerManager().updateContainerState(id, HddsProtos
.LifeCycleEvent.FINALIZE);
} else {
- scm.getScmContainerManager().updateContainerState(id, HddsProtos
+ scm.getContainerManager().updateContainerState(id, HddsProtos
.LifeCycleEvent.CLOSE);
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 9c6fa88..2c96856 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -196,7 +196,7 @@
.register(datanodeDetails, nodeReport, pipelineReportsProto);
if (registeredCommand.getError()
== SCMRegisteredResponseProto.ErrorCode.success) {
- scm.getScmContainerManager().processContainerReports(datanodeDetails,
+ scm.getContainerManager().processContainerReports(datanodeDetails,
containerReportsProto, true);
eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
new NodeRegistrationContainerReport(datanodeDetails,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index bdafe0c..efd5fc5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -40,9 +40,9 @@
import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
-import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.replication
.ReplicationActivityStatus;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
@@ -151,7 +151,7 @@
* State Managers of SCM.
*/
private final NodeManager scmNodeManager;
- private final Mapping scmContainerManager;
+ private final ContainerManager containerManager;
private final BlockManager scmBlockManager;
private final SCMStorage scmStorage;
@@ -206,43 +206,43 @@
scmNodeManager = new SCMNodeManager(
conf, scmStorage.getClusterID(), this, eventQueue);
- scmContainerManager = new ContainerMapping(
+ containerManager = new SCMContainerManager(
conf, getScmNodeManager(), cacheSize, eventQueue);
scmBlockManager = new BlockManagerImpl(
- conf, getScmNodeManager(), scmContainerManager, eventQueue);
+ conf, getScmNodeManager(), containerManager, eventQueue);
replicationStatus = new ReplicationActivityStatus();
CloseContainerEventHandler closeContainerHandler =
- new CloseContainerEventHandler(scmContainerManager);
+ new CloseContainerEventHandler(containerManager);
NodeReportHandler nodeReportHandler =
new NodeReportHandler(scmNodeManager);
PipelineReportHandler pipelineReportHandler =
new PipelineReportHandler(
- scmContainerManager.getPipelineSelector());
+ containerManager.getPipelineSelector());
CommandStatusReportHandler cmdStatusReportHandler =
new CommandStatusReportHandler();
NewNodeHandler newNodeHandler = new NewNodeHandler(scmNodeManager);
StaleNodeHandler staleNodeHandler =
- new StaleNodeHandler(scmContainerManager.getPipelineSelector());
+ new StaleNodeHandler(containerManager.getPipelineSelector());
DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
- getScmContainerManager().getStateManager());
+ getContainerManager().getStateManager());
ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
PendingDeleteHandler pendingDeleteHandler =
new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
ContainerReportHandler containerReportHandler =
- new ContainerReportHandler(scmContainerManager, scmNodeManager,
+ new ContainerReportHandler(containerManager, scmNodeManager,
replicationStatus);
scmChillModeManager = new SCMChillModeManager(conf,
- getScmContainerManager().getStateManager().getAllContainers(),
+ getContainerManager().getStateManager().getAllContainers(),
eventQueue);
PipelineActionEventHandler pipelineActionEventHandler =
new PipelineActionEventHandler();
PipelineCloseHandler pipelineCloseHandler =
- new PipelineCloseHandler(scmContainerManager.getPipelineSelector());
+ new PipelineCloseHandler(containerManager.getPipelineSelector());
long watcherTimeout =
conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
@@ -263,14 +263,14 @@
new SCMContainerPlacementCapacity(scmNodeManager, conf);
replicationManager = new ReplicationManager(containerPlacementPolicy,
- scmContainerManager.getStateManager(), eventQueue,
+ containerManager.getStateManager(), eventQueue,
commandWatcherLeaseManager);
// setup CloseContainer watcher
CloseContainerWatcher closeContainerWatcher =
new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
SCMEvents.CLOSE_CONTAINER_STATUS, commandWatcherLeaseManager,
- scmContainerManager);
+ containerManager);
closeContainerWatcher.start(eventQueue);
scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
@@ -632,7 +632,7 @@
@VisibleForTesting
public ContainerInfo getContainerInfo(long containerID) throws
IOException {
- return scmContainerManager.getContainer(containerID);
+ return containerManager.getContainer(containerID);
}
/**
@@ -774,7 +774,7 @@
} catch (Exception ex) {
LOG.error("SCM Event Queue stop failed", ex);
}
- IOUtils.cleanupWithLogger(LOG, scmContainerManager);
+ IOUtils.cleanupWithLogger(LOG, containerManager);
}
/**
@@ -805,8 +805,8 @@
* Returns SCM container manager.
*/
@VisibleForTesting
- public Mapping getScmContainerManager() {
- return scmContainerManager;
+ public ContainerManager getContainerManager() {
+ return containerManager;
}
/**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index e70e444..25f6ae3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -56,7 +56,7 @@
* Tests for SCM Block Manager.
*/
public class TestBlockManager implements EventHandler<Boolean> {
- private static ContainerMapping mapping;
+ private static SCMContainerManager mapping;
private static MockNodeManager nodeManager;
private static BlockManagerImpl blockManager;
private static File testDir;
@@ -83,7 +83,7 @@
throw new IOException("Unable to create test directory path");
}
nodeManager = new MockNodeManager(true, 10);
- mapping = new ContainerMapping(conf, nodeManager, 128, eventQueue);
+ mapping = new SCMContainerManager(conf, nodeManager, 128, eventQueue);
blockManager = new BlockManagerImpl(conf,
nodeManager, mapping, eventQueue);
eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index 9f0e336..0812027 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -19,8 +19,8 @@
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@@ -72,7 +72,7 @@
private static DeletedBlockLogImpl deletedBlockLog;
private OzoneConfiguration conf;
private File testDir;
- private Mapping containerManager;
+ private ContainerManager containerManager;
private List<DatanodeDetails> dnList;
@Before
@@ -82,7 +82,7 @@
conf = new OzoneConfiguration();
conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
- containerManager = Mockito.mock(ContainerMapping.class);
+ containerManager = Mockito.mock(SCMContainerManager.class);
deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
dnList = new ArrayList<>(3);
setupContainerManager();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 38050c9..3917d39 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -50,7 +50,7 @@
private static Configuration configuration;
private static MockNodeManager nodeManager;
- private static ContainerMapping mapping;
+ private static SCMContainerManager mapping;
private static long size;
private static File testDir;
private static EventQueue eventQueue;
@@ -65,7 +65,7 @@
configuration
.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
nodeManager = new MockNodeManager(true, 10);
- mapping = new ContainerMapping(configuration, nodeManager, 128,
+ mapping = new SCMContainerManager(configuration, nodeManager, 128,
new EventQueue());
eventQueue = new EventQueue();
eventQueue.addHandler(CLOSE_CONTAINER,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index f79ae1e..7f32be5 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -74,10 +74,10 @@
public void test() throws IOException {
//GIVEN
OzoneConfiguration conf = new OzoneConfiguration();
- Mapping mapping = Mockito.mock(Mapping.class);
+ ContainerManager containerManager = Mockito.mock(ContainerManager.class);
PipelineSelector selector = Mockito.mock(PipelineSelector.class);
- when(mapping.getContainer(anyLong()))
+ when(containerManager.getContainer(anyLong()))
.thenAnswer(
(Answer<ContainerInfo>) invocation ->
new Builder()
@@ -88,15 +88,15 @@
);
ContainerStateManager containerStateManager =
- new ContainerStateManager(conf, mapping, selector);
+ new ContainerStateManager(conf, containerManager, selector);
- when(mapping.getStateManager()).thenReturn(containerStateManager);
+ when(containerManager.getStateManager()).thenReturn(containerStateManager);
ReplicationActivityStatus replicationActivityStatus =
new ReplicationActivityStatus();
ContainerReportHandler reportHandler =
- new ContainerReportHandler(mapping, nodeManager,
+ new ContainerReportHandler(containerManager, nodeManager,
replicationActivityStatus);
DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index b857740..785753b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -41,7 +41,7 @@
@Before
public void init() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
- Mapping mapping = Mockito.mock(Mapping.class);
+ ContainerManager mapping = Mockito.mock(ContainerManager.class);
PipelineSelector selector = Mockito.mock(PipelineSelector.class);
containerStateManager = new ContainerStateManager(conf, mapping, selector);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
similarity index 78%
rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index f9a881e..b067ac9 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -58,10 +58,10 @@
import java.util.concurrent.TimeUnit;
/**
- * Tests for Container Mapping.
+ * Tests for Container ContainerManager.
*/
-public class TestContainerMapping {
- private static ContainerMapping mapping;
+public class TestSCMContainerManager {
+ private static SCMContainerManager containerManager;
private static MockNodeManager nodeManager;
private static File testDir;
private static XceiverClientManager xceiverClientManager;
@@ -77,7 +77,7 @@
Configuration conf = SCMTestUtils.getConf();
testDir = GenericTestUtils
- .getTestDir(TestContainerMapping.class.getSimpleName());
+ .getTestDir(TestSCMContainerManager.class.getSimpleName());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
testDir.getAbsolutePath());
conf.setTimeDuration(
@@ -89,7 +89,7 @@
throw new IOException("Unable to create test directory path");
}
nodeManager = new MockNodeManager(true, 10);
- mapping = new ContainerMapping(conf, nodeManager, 128,
+ containerManager = new SCMContainerManager(conf, nodeManager, 128,
new EventQueue());
xceiverClientManager = new XceiverClientManager(conf);
random = new Random();
@@ -97,8 +97,8 @@
@AfterClass
public static void cleanup() throws IOException {
- if(mapping != null) {
- mapping.close();
+ if(containerManager != null) {
+ containerManager.close();
}
FileUtil.fullyDelete(testDir);
}
@@ -110,7 +110,7 @@
@Test
public void testallocateContainer() throws Exception {
- ContainerWithPipeline containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerInfo = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
@@ -127,7 +127,7 @@
*/
Set<UUID> pipelineList = new TreeSet<>();
for (int x = 0; x < 30; x++) {
- ContainerWithPipeline containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerInfo = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
@@ -142,7 +142,7 @@
@Test
public void testGetContainer() throws IOException {
- ContainerWithPipeline containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerInfo = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
@@ -155,10 +155,9 @@
@Test
public void testGetContainerWithPipeline() throws Exception {
- ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(),
- containerOwner);
+ ContainerWithPipeline containerWithPipeline = containerManager
+ .allocateContainer(xceiverClientManager.getType(),
+ xceiverClientManager.getFactor(), containerOwner);
ContainerInfo contInfo = containerWithPipeline.getContainerInfo();
// Add dummy replicas for container.
DatanodeDetails dn1 = DatanodeDetails.newBuilder()
@@ -169,28 +168,28 @@
.setHostName("host2")
.setIpAddress("2.2.2.2")
.setUuid(UUID.randomUUID().toString()).build();
- mapping
+ containerManager
.updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CREATE);
- mapping.updateContainerState(contInfo.getContainerID(),
+ containerManager.updateContainerState(contInfo.getContainerID(),
LifeCycleEvent.CREATED);
- mapping.updateContainerState(contInfo.getContainerID(),
+ containerManager.updateContainerState(contInfo.getContainerID(),
LifeCycleEvent.FINALIZE);
- mapping
+ containerManager
.updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CLOSE);
ContainerInfo finalContInfo = contInfo;
LambdaTestUtils.intercept(SCMException.class, "No entry exist for "
- + "containerId:", () -> mapping.getContainerWithPipeline(
+ + "containerId:", () -> containerManager.getContainerWithPipeline(
finalContInfo.getContainerID()));
- mapping.getStateManager().getContainerStateMap()
+ containerManager.getStateManager().getContainerStateMap()
.addContainerReplica(contInfo.containerID(), dn1, dn2);
- contInfo = mapping.getContainer(contInfo.getContainerID());
+ contInfo = containerManager.getContainer(contInfo.getContainerID());
Assert.assertEquals(contInfo.getState(), LifeCycleState.CLOSED);
Pipeline pipeline = containerWithPipeline.getPipeline();
- mapping.getPipelineSelector().finalizePipeline(pipeline);
+ containerManager.getPipelineSelector().finalizePipeline(pipeline);
- ContainerWithPipeline containerWithPipeline2 = mapping
+ ContainerWithPipeline containerWithPipeline2 = containerManager
.getContainerWithPipeline(contInfo.getContainerID());
pipeline = containerWithPipeline2.getPipeline();
Assert.assertNotEquals(containerWithPipeline, containerWithPipeline2);
@@ -202,24 +201,23 @@
@Test
public void testgetNoneExistentContainer() throws IOException {
thrown.expectMessage("Specified key does not exist.");
- mapping.getContainer(random.nextLong());
+ containerManager.getContainer(random.nextLong());
}
@Test
public void testContainerCreationLeaseTimeout() throws IOException,
InterruptedException {
nodeManager.setChillmode(false);
- ContainerWithPipeline containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerInfo = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
- mapping.updateContainerState(containerInfo.getContainerInfo()
+ containerManager.updateContainerState(containerInfo.getContainerInfo()
.getContainerID(), HddsProtos.LifeCycleEvent.CREATE);
Thread.sleep(TIMEOUT + 1000);
- NavigableSet<ContainerID> deleteContainers = mapping.getStateManager()
- .getMatchingContainerIDs(
- "OZONE",
+ NavigableSet<ContainerID> deleteContainers = containerManager
+ .getStateManager().getMatchingContainerIDs("OZONE",
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETING);
@@ -228,7 +226,7 @@
thrown.expect(IOException.class);
thrown.expectMessage("Lease Exception");
- mapping
+ containerManager
.updateContainerState(containerInfo.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
}
@@ -258,25 +256,27 @@
.newBuilder();
crBuilder.addAllReports(reports);
- mapping.processContainerReports(datanodeDetails, crBuilder.build(), false);
+ containerManager.processContainerReports(
+ datanodeDetails, crBuilder.build(), false);
ContainerInfo updatedContainer =
- mapping.getContainer(info.getContainerID());
+ containerManager.getContainer(info.getContainerID());
Assert.assertEquals(100000000L,
updatedContainer.getNumberOfKeys());
Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) {
LambdaTestUtils.intercept(SCMException.class, "No entry "
- + "exist for containerId:", () -> mapping.getStateManager()
+ + "exist for containerId:", () -> containerManager.getStateManager()
.getContainerReplicas(ContainerID.valueof(c.getContainerID())));
}
- mapping.processContainerReports(TestUtils.randomDatanodeDetails(),
+ containerManager.processContainerReports(TestUtils.randomDatanodeDetails(),
crBuilder.build(), true);
for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) {
- Assert.assertTrue(mapping.getStateManager().getContainerReplicas(
- ContainerID.valueof(c.getContainerID())).size() > 0);
+ Assert.assertTrue(containerManager.getStateManager()
+ .getContainerReplicas(
+ ContainerID.valueof(c.getContainerID())).size() > 0);
}
}
@@ -313,9 +313,10 @@
.newBuilder();
crBuilder.addAllReports(reports);
- mapping.processContainerReports(datanodeDetails, crBuilder.build(), false);
+ containerManager.processContainerReports(
+ datanodeDetails, crBuilder.build(), false);
- List<ContainerInfo> list = mapping.listContainer(0, 50);
+ List<ContainerInfo> list = containerManager.listContainer(0, 50);
Assert.assertEquals(2, list.stream().filter(
x -> x.getContainerID() == cID1 || x.getContainerID() == cID2).count());
Assert.assertEquals(300000000L, list.stream().filter(
@@ -329,20 +330,18 @@
@Test
public void testCloseContainer() throws IOException {
ContainerInfo info = createContainer();
- mapping.updateContainerState(info.getContainerID(),
+ containerManager.updateContainerState(info.getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
- NavigableSet<ContainerID> pendingCloseContainers = mapping.getStateManager()
- .getMatchingContainerIDs(
- containerOwner,
+ NavigableSet<ContainerID> pendingCloseContainers = containerManager
+ .getStateManager().getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSING);
Assert.assertTrue(pendingCloseContainers.contains(info.containerID()));
- mapping.updateContainerState(info.getContainerID(),
+ containerManager.updateContainerState(info.getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
- NavigableSet<ContainerID> closeContainers = mapping.getStateManager()
- .getMatchingContainerIDs(
- containerOwner,
+ NavigableSet<ContainerID> closeContainers = containerManager
+ .getStateManager().getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CLOSED);
@@ -350,20 +349,19 @@
}
/**
- * Creates a container with the given name in ContainerMapping.
+ * Creates a container with the given name in SCMContainerManager.
* @throws IOException
*/
private ContainerInfo createContainer()
throws IOException {
nodeManager.setChillmode(false);
- ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(),
- containerOwner);
+ ContainerWithPipeline containerWithPipeline = containerManager
+ .allocateContainer(xceiverClientManager.getType(),
+ xceiverClientManager.getFactor(), containerOwner);
ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
- mapping.updateContainerState(containerInfo.getContainerID(),
+ containerManager.updateContainerState(containerInfo.getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
- mapping.updateContainerState(containerInfo.getContainerID(),
+ containerManager.updateContainerState(containerInfo.getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
return containerInfo;
}
@@ -371,10 +369,10 @@
@Test
public void testFlushAllContainers() throws IOException {
ContainerInfo info = createContainer();
- List<ContainerInfo> containers = mapping.getStateManager()
+ List<ContainerInfo> containers = containerManager.getStateManager()
.getAllContainers();
Assert.assertTrue(containers.size() > 0);
- mapping.flushContainerInfo();
+ containerManager.flushContainerInfo();
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index cd79d44..175180a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
.ContainerPlacementPolicy;
@@ -97,12 +97,13 @@
return nodeManager;
}
- ContainerMapping createContainerManager(Configuration config,
+ SCMContainerManager createContainerManager(Configuration config,
NodeManager scmNodeManager) throws IOException {
EventQueue eventQueue = new EventQueue();
final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
- return new ContainerMapping(config, scmNodeManager, cacheSize, eventQueue);
+ return new SCMContainerManager(config, scmNodeManager, cacheSize,
+ eventQueue);
}
@@ -131,7 +132,7 @@
SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
SCMNodeManager nodeManager = createNodeManager(conf);
- ContainerMapping containerManager =
+ SCMContainerManager containerManager =
createContainerManager(conf, nodeManager);
List<DatanodeDetails> datanodes =
TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 7bba032..9d6927d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -31,8 +31,8 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
-import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
@@ -69,7 +69,7 @@
public void setup() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
containerStateManager = new ContainerStateManager(conf,
- Mockito.mock(Mapping.class),
+ Mockito.mock(ContainerManager.class),
Mockito.mock(PipelineSelector.class));
eventQueue = new EventQueue();
nodeManager = new SCMNodeManager(conf, "cluster1", null, eventQueue);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
index 56c3830..27195a1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
@@ -27,7 +27,7 @@
.CloseContainerRetryableReq;
import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -61,8 +61,8 @@
private static EventWatcher<CloseContainerRetryableReq, CloseContainerStatus>
watcher;
private static LeaseManager<Long> leaseManager;
- private static ContainerMapping containerMapping = Mockito
- .mock(ContainerMapping.class);
+ private static SCMContainerManager containerManager = Mockito
+ .mock(SCMContainerManager.class);
private static EventQueue queue;
@Rule
public Timeout timeout = new Timeout(1000*15);
@@ -230,7 +230,7 @@
time);
leaseManager.start();
watcher = new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
- SCMEvents.CLOSE_CONTAINER_STATUS, leaseManager, containerMapping);
+ SCMEvents.CLOSE_CONTAINER_STATUS, leaseManager, containerManager);
queue = new EventQueue();
watcher.start(queue);
}
@@ -274,8 +274,8 @@
throws IOException {
ContainerInfo containerInfo = Mockito.mock(ContainerInfo.class);
ContainerInfo containerInfo2 = Mockito.mock(ContainerInfo.class);
- when(containerMapping.getContainer(id1)).thenReturn(containerInfo);
- when(containerMapping.getContainer(id2)).thenReturn(containerInfo2);
+ when(containerManager.getContainer(id1)).thenReturn(containerInfo);
+ when(containerManager.getContainer(id2)).thenReturn(containerInfo2);
when(containerInfo.isContainerOpen()).thenReturn(true);
when(containerInfo2.isContainerOpen()).thenReturn(isOpen);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index a8f7e01..ed8b1e3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -55,7 +55,7 @@
private MiniOzoneCluster cluster;
private XceiverClientManager xceiverClientManager;
private StorageContainerManager scm;
- private ContainerMapping scmContainerMapping;
+ private ContainerManager containerManager;
private ContainerStateManager containerStateManager;
private PipelineSelector selector;
private String containerOwner = "OZONE";
@@ -69,9 +69,9 @@
cluster.waitTobeOutOfChillMode();
xceiverClientManager = new XceiverClientManager(conf);
scm = cluster.getStorageContainerManager();
- scmContainerMapping = (ContainerMapping) scm.getScmContainerManager();
- containerStateManager = scmContainerMapping.getStateManager();
- selector = scmContainerMapping.getPipelineSelector();
+ containerManager = scm.getContainerManager();
+ containerStateManager = containerManager.getStateManager();
+ selector = containerManager.getPipelineSelector();
}
@After
@@ -128,7 +128,7 @@
xceiverClientManager.getFactor(), containerOwner);
containers.add(container.getContainerInfo());
if (i >= 5) {
- scm.getScmContainerManager().updateContainerState(container
+ scm.getContainerManager().updateContainerState(container
.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
}
@@ -137,7 +137,7 @@
// New instance of ContainerStateManager should load all the containers in
// container store.
ContainerStateManager stateManager =
- new ContainerStateManager(conf, scmContainerMapping, selector);
+ new ContainerStateManager(conf, containerManager, selector);
int matchCount = stateManager
.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -154,10 +154,10 @@
ContainerWithPipeline container1 = scm.getClientProtocolServer().
allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
- scmContainerMapping
+ containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping
+ containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
@@ -179,10 +179,10 @@
Assert.assertEquals(container2.getContainerInfo().getContainerID(),
info.getContainerID());
- scmContainerMapping
+ containerManager
.updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping
+ containerManager
.updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
@@ -216,7 +216,7 @@
HddsProtos.LifeCycleState.ALLOCATED).size();
Assert.assertEquals(1, containers);
- scmContainerMapping
+ containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@@ -224,7 +224,7 @@
HddsProtos.LifeCycleState.CREATING).size();
Assert.assertEquals(1, containers);
- scmContainerMapping
+ containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@@ -232,7 +232,7 @@
HddsProtos.LifeCycleState.OPEN).size();
Assert.assertEquals(1, containers);
- scmContainerMapping
+ containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@@ -240,7 +240,7 @@
HddsProtos.LifeCycleState.CLOSING).size();
Assert.assertEquals(1, containers);
- scmContainerMapping
+ containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@@ -248,7 +248,7 @@
HddsProtos.LifeCycleState.CLOSED).size();
Assert.assertEquals(1, containers);
- scmContainerMapping
+ containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.DELETE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@@ -256,7 +256,7 @@
HddsProtos.LifeCycleState.DELETING).size();
Assert.assertEquals(1, containers);
- scmContainerMapping
+ containerManager
.updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLEANUP);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@@ -270,10 +270,10 @@
.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
- scmContainerMapping
+ containerManager
.updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping
+ containerManager
.updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.TIMEOUT);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@@ -287,16 +287,16 @@
.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
- scmContainerMapping
+ containerManager
.updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping
+ containerManager
.updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
- scmContainerMapping
+ containerManager
.updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
- scmContainerMapping
+ containerManager
.updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
@@ -310,10 +310,10 @@
ContainerWithPipeline container1 = scm.getClientProtocolServer()
.allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
- scmContainerMapping.updateContainerState(container1
+ containerManager.updateContainerState(container1
.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping.updateContainerState(container1
+ containerManager.updateContainerState(container1
.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
@@ -330,8 +330,8 @@
Assert.assertEquals(container1.getContainerInfo().getContainerID(),
info.getContainerID());
- ContainerMapping containerMapping =
- (ContainerMapping) scmContainerMapping;
+ SCMContainerManager containerMapping =
+ (SCMContainerManager) containerManager;
// manually trigger a flush, this will persist the allocated bytes value
// to disk
containerMapping.flushContainerInfo();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index e61a909..c0a6989 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers
.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
@@ -51,7 +51,7 @@
private static StorageContainerManager scm;
private static ContainerWithPipeline ratisContainer;
private static ContainerStateMap stateMap;
- private static ContainerMapping mapping;
+ private static ContainerManager containerManager;
private static PipelineSelector pipelineSelector;
/**
@@ -65,10 +65,11 @@
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
cluster.waitForClusterToBeReady();
scm = cluster.getStorageContainerManager();
- mapping = (ContainerMapping)scm.getScmContainerManager();
- stateMap = mapping.getStateManager().getContainerStateMap();
- ratisContainer = mapping.allocateContainer(RATIS, THREE, "testOwner");
- pipelineSelector = mapping.getPipelineSelector();
+ containerManager = scm.getContainerManager();
+ stateMap = containerManager.getStateManager().getContainerStateMap();
+ ratisContainer = containerManager.allocateContainer(
+ RATIS, THREE, "testOwner");
+ pipelineSelector = containerManager.getPipelineSelector();
}
/**
@@ -106,13 +107,13 @@
// Now close the container and it should not show up while fetching
// containers by pipeline
- mapping
+ containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CREATE);
- mapping
+ containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CREATED);
- mapping
+ containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE);
- mapping
+ containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
Set<ContainerID> set2 = pipelineSelector.getOpenContainerIDsByPipeline(
ratisContainer.getPipeline().getId());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
index 7e3969c..f3e1ece 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers
.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@@ -52,7 +52,7 @@
private static OzoneConfiguration conf;
private static ContainerWithPipeline ratisContainer1;
private static ContainerWithPipeline ratisContainer2;
- private static ContainerMapping mapping;
+ private static ContainerManager containerManager;
private static long timeForFailure;
/**
@@ -75,9 +75,11 @@
.build();
cluster.waitForClusterToBeReady();
StorageContainerManager scm = cluster.getStorageContainerManager();
- mapping = (ContainerMapping)scm.getScmContainerManager();
- ratisContainer1 = mapping.allocateContainer(RATIS, THREE, "testOwner");
- ratisContainer2 = mapping.allocateContainer(RATIS, THREE, "testOwner");
+ containerManager = scm.getContainerManager();
+ ratisContainer1 = containerManager.allocateContainer(
+ RATIS, THREE, "testOwner");
+ ratisContainer2 = containerManager.allocateContainer(
+ RATIS, THREE, "testOwner");
// At this stage, there should be 2 pipeline one with 1 open container each.
// Try closing the both the pipelines, one with a closed container and
// the other with an open container.
@@ -113,12 +115,12 @@
ratisContainer1.getPipeline().getLifeCycleState());
Assert.assertEquals(HddsProtos.LifeCycleState.OPEN,
ratisContainer2.getPipeline().getLifeCycleState());
- Assert.assertNull(
- mapping.getPipelineSelector().getPipeline(pipelineToFail.getId()));
+ Assert.assertNull(containerManager.getPipelineSelector()
+ .getPipeline(pipelineToFail.getId()));
// Now restart the datanode and make sure that a new pipeline is created.
cluster.restartHddsDatanode(dnToFail);
ContainerWithPipeline ratisContainer3 =
- mapping.allocateContainer(RATIS, THREE, "testOwner");
+ containerManager.allocateContainer(RATIS, THREE, "testOwner");
//Assert that new container is not created from the ratis 2 pipeline
Assert.assertNotEquals(ratisContainer3.getPipeline().getId(),
ratisContainer2.getPipeline().getId());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index b02eae2..7e6d5b4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers
.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@@ -51,7 +51,7 @@
private static ContainerWithPipeline ratisContainer1;
private static ContainerWithPipeline ratisContainer2;
private static ContainerStateMap stateMap;
- private static ContainerMapping mapping;
+ private static ContainerManager containerManager;
private static PipelineSelector pipelineSelector;
/**
@@ -65,11 +65,13 @@
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(6).build();
cluster.waitForClusterToBeReady();
scm = cluster.getStorageContainerManager();
- mapping = (ContainerMapping)scm.getScmContainerManager();
- stateMap = mapping.getStateManager().getContainerStateMap();
- ratisContainer1 = mapping.allocateContainer(RATIS, THREE, "testOwner");
- ratisContainer2 = mapping.allocateContainer(RATIS, THREE, "testOwner");
- pipelineSelector = mapping.getPipelineSelector();
+ containerManager = scm.getContainerManager();
+ stateMap = containerManager.getStateManager().getContainerStateMap();
+ ratisContainer1 = containerManager
+ .allocateContainer(RATIS, THREE, "testOwner");
+ ratisContainer2 = containerManager
+ .allocateContainer(RATIS, THREE, "testOwner");
+ pipelineSelector = containerManager.getPipelineSelector();
// At this stage, there should be 2 pipeline one with 1 open container each.
// Try closing the both the pipelines, one with a closed container and
// the other with an open container.
@@ -98,13 +100,13 @@
// Now close the container and it should not show up while fetching
// containers by pipeline
- mapping
+ containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CREATE);
- mapping
+ containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CREATED);
- mapping
+ containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE);
- mapping
+ containerManager
.updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
Set<ContainerID> setClosed = pipelineSelector.getOpenContainerIDsByPipeline(
@@ -132,9 +134,9 @@
Assert.assertEquals(1, setOpen.size());
long cId2 = ratisContainer2.getContainerInfo().getContainerID();
- mapping
+ containerManager
.updateContainerState(cId2, HddsProtos.LifeCycleEvent.CREATE);
- mapping
+ containerManager
.updateContainerState(cId2, HddsProtos.LifeCycleEvent.CREATED);
pipelineSelector.finalizePipeline(ratisContainer2.getPipeline());
Assert.assertEquals(ratisContainer2.getPipeline().getLifeCycleState(),
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
index fb94b3c..bac4022 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
@@ -20,7 +20,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -46,8 +46,8 @@
private static OzoneConfiguration conf;
private static Pipeline ratisPipeline1;
private static Pipeline ratisPipeline2;
- private static ContainerMapping mapping;
- private static ContainerMapping newMapping;
+ private static ContainerManager containerManager;
+ private static ContainerManager newContainerManager;
/**
* Create a MiniDFSCluster for testing.
@@ -64,17 +64,17 @@
.build();
cluster.waitForClusterToBeReady();
StorageContainerManager scm = cluster.getStorageContainerManager();
- mapping = (ContainerMapping)scm.getScmContainerManager();
- ratisPipeline1 =
- mapping.allocateContainer(RATIS, THREE, "Owner1").getPipeline();
- ratisPipeline2 =
- mapping.allocateContainer(RATIS, ONE, "Owner2").getPipeline();
+ containerManager = scm.getContainerManager();
+ ratisPipeline1 = containerManager.allocateContainer(
+ RATIS, THREE, "Owner1").getPipeline();
+ ratisPipeline2 = containerManager.allocateContainer(
+ RATIS, ONE, "Owner2").getPipeline();
// At this stage, there should be 2 pipeline one with 1 open container
// each. Try restarting the SCM and then discover that pipeline are in
// correct state.
cluster.restartStorageContainerManager();
- newMapping = (ContainerMapping)(cluster.getStorageContainerManager()
- .getScmContainerManager());
+ newContainerManager = cluster.getStorageContainerManager()
+ .getContainerManager();
}
/**
@@ -90,10 +90,10 @@
@Test
public void testPipelineWithScmRestart() throws IOException {
// After restart make sure that the pipeline are still present
- Pipeline ratisPipeline1AfterRestart = newMapping.getPipelineSelector()
- .getPipeline(ratisPipeline1.getId());
- Pipeline ratisPipeline2AfterRestart = newMapping.getPipelineSelector()
- .getPipeline(ratisPipeline2.getId());
+ Pipeline ratisPipeline1AfterRestart = newContainerManager
+ .getPipelineSelector().getPipeline(ratisPipeline1.getId());
+ Pipeline ratisPipeline2AfterRestart = newContainerManager
+ .getPipelineSelector().getPipeline(ratisPipeline2.getId());
Assert.assertNotSame(ratisPipeline1AfterRestart, ratisPipeline1);
Assert.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2);
Assert.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1);
@@ -111,9 +111,8 @@
// Try creating a new ratis pipeline, it should be from the same pipeline
// as was before restart
- Pipeline newRatisPipeline =
- newMapping.allocateContainer(RATIS, THREE, "Owner1")
- .getPipeline();
+ Pipeline newRatisPipeline = newContainerManager
+ .allocateContainer(RATIS, THREE, "Owner1").getPipeline();
Assert.assertEquals(newRatisPipeline.getId(), ratisPipeline1.getId());
}
}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index 7787b53..16e66ba 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -44,13 +44,13 @@
StorageContainerManager scm) throws IOException {
return performOperationOnKeyContainers((blockID) -> {
try {
- scm.getScmContainerManager()
+ scm.getContainerManager()
.updateContainerState(blockID.getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
- scm.getScmContainerManager()
+ scm.getContainerManager()
.updateContainerState(blockID.getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
- Assert.assertFalse(scm.getScmContainerManager()
+ Assert.assertFalse(scm.getContainerManager()
.getContainerWithPipeline(blockID.getContainerID())
.getContainerInfo().isContainerOpen());
} catch (IOException e) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index ddff0c5..2813f79 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -445,7 +445,7 @@
// Sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the actually value size.
Pipeline pipeline = cluster.getStorageContainerManager()
- .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(datanodes.size(), 1);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 83421b2..f606263 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -301,7 +301,7 @@
Assert.assertTrue(!containerIdList.isEmpty());
for (long containerID : containerIdList) {
Pipeline pipeline =
- cluster.getStorageContainerManager().getScmContainerManager()
+ cluster.getStorageContainerManager().getContainerManager()
.getContainerWithPipeline(containerID).getPipeline();
pipelineList.add(pipeline);
List<DatanodeDetails> datanodes = pipeline.getMachines();
@@ -349,7 +349,7 @@
new ArrayList<>(groupOutputStream.getLocationInfoList());
long containerID = locationInfos.get(0).getContainerID();
List<DatanodeDetails> datanodes =
- cluster.getStorageContainerManager().getScmContainerManager()
+ cluster.getStorageContainerManager().getContainerManager()
.getContainerWithPipeline(containerID).getPipeline().getMachines();
Assert.assertEquals(1, datanodes.size());
waitForContainerClose(keyName, key, HddsProtos.ReplicationType.STAND_ALONE);
@@ -451,7 +451,7 @@
groupOutputStream.getLocationInfoList();
long containerID = locationInfos.get(0).getContainerID();
List<DatanodeDetails> datanodes =
- cluster.getStorageContainerManager().getScmContainerManager()
+ cluster.getStorageContainerManager().getContainerManager()
.getContainerWithPipeline(containerID).getPipeline().getMachines();
Assert.assertEquals(1, datanodes.size());
// move the container on the datanode to Closing state, this will ensure
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index bf1eba6..3c6bd4a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -580,7 +580,7 @@
// Second, sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the size from keyDetails.
Pipeline pipeline = cluster.getStorageContainerManager()
- .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(datanodes.size(), 1);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 1e0317f..5b49ac6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -72,7 +72,6 @@
.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone
.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
@@ -239,7 +238,7 @@
ContainerReportsProto dummyReport = dummyReportsBuilder.build();
logCapturer.clearOutput();
- scm.getScmContainerManager().processContainerReports(
+ scm.getContainerManager().processContainerReports(
cluster.getHddsDatanodes().get(0).getDatanodeDetails(), dummyReport,
false);
// wait for event to be handled by event handler
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 8c52847..9602207 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -102,7 +102,7 @@
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerManager()
- .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(datanodes.size(), 1);
@@ -157,7 +157,7 @@
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerManager()
- .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(datanodes.size(), 1);
@@ -214,7 +214,7 @@
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerManager()
- .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
List<DatanodeDetails> datanodes = pipeline.getMachines();
Assert.assertEquals(3, datanodes.size());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index 84b7b76..0137a40 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -81,7 +81,7 @@
long containerID = omKeyLocationInfo.getContainerID();
Pipeline pipeline = cluster.getStorageContainerManager()
- .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getContainerManager().getContainerWithPipeline(containerID)
.getPipeline();
Assert.assertFalse(isContainerClosed(cluster, containerID));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
index 2a6921a..ef84b0e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
@@ -29,7 +29,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -128,7 +128,7 @@
Map<String, OmKeyInfo> keyLocations = helper.createKeys(100, 4096);
final List<ContainerInfo> containers = cluster
.getStorageContainerManager()
- .getScmContainerManager().getStateManager().getAllContainers();
+ .getContainerManager().getStateManager().getAllContainers();
GenericTestUtils.waitFor(() -> {
return containers.size() > 10;
}, 100, 1000);
@@ -251,7 +251,7 @@
new TestStorageContainerManagerHelper(miniCluster, conf);
Map<String, OmKeyInfo> keyLocations = helper.createKeys(100 * 2, 4096);
final List<ContainerInfo> containers = miniCluster
- .getStorageContainerManager().getScmContainerManager()
+ .getStorageContainerManager().getContainerManager()
.getStateManager().getAllContainers();
GenericTestUtils.waitFor(() -> {
return containers.size() > 10;
@@ -264,8 +264,8 @@
containers.remove(3);
// Close remaining containers
- ContainerMapping mapping = (ContainerMapping) miniCluster
- .getStorageContainerManager().getScmContainerManager();
+ SCMContainerManager mapping = (SCMContainerManager) miniCluster
+ .getStorageContainerManager().getContainerManager();
containers.forEach(c -> {
try {
mapping.updateContainerState(c.getContainerID(),
@@ -347,7 +347,7 @@
SCMClientProtocolServer clientProtocolServer = cluster
.getStorageContainerManager().getClientProtocolServer();
assertFalse((scm.getClientProtocolServer()).getChillModeStatus());
- final List<ContainerInfo> containers = scm.getScmContainerManager()
+ final List<ContainerInfo> containers = scm.getContainerManager()
.getStateManager().getAllContainers();
scm.getEventQueue().fireEvent(SCMEvents.CHILL_MODE_STATUS, true);
GenericTestUtils.waitFor(() -> {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index 4026348..a476583 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.ozone.scm;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.server.events.EventQueue;
@@ -25,7 +27,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -83,7 +84,7 @@
private OzoneConfiguration conf;
private String datanodeIpAddress;
- private ContainerMapping mapping;
+ private ContainerManager containerManager;
private NodeManager nodeManager;
private BlockManagerImpl blockManager;
@@ -119,9 +120,10 @@
cluster.getStorageContainerManager().stop();
eventQueue = new EventQueue();
nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
- mapping = new ContainerMapping(conf, nodeManager, 128,
+ containerManager = new SCMContainerManager(conf, nodeManager, 128,
eventQueue);
- blockManager = new BlockManagerImpl(conf, nodeManager, mapping, eventQueue);
+ blockManager = new BlockManagerImpl(
+ conf, nodeManager, containerManager, eventQueue);
eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);
eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
GenericTestUtils.waitFor(() -> {
@@ -165,7 +167,7 @@
}
blockManager.close();
- mapping.close();
+ containerManager.close();
nodeManager.close();
conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);