HDDS-10940. Remove useless TestSCMContainerPlacementPolicyMetrics (#6749)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
index 3ac5bcb..d1a2d47 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
@@ -72,6 +72,8 @@ public final class SCMContainerPlacementCapacity
public static final Logger LOG =
LoggerFactory.getLogger(SCMContainerPlacementCapacity.class);
+ private final SCMContainerPlacementMetrics metrics;
+
/**
* Constructs a Container Placement with considering only capacity.
* That is this policy tries to place containers based on node weight.
@@ -83,6 +85,7 @@ public SCMContainerPlacementCapacity(final NodeManager nodeManager,
final ConfigurationSource conf, final NetworkTopology networkTopology,
final boolean fallback, final SCMContainerPlacementMetrics metrics) {
super(nodeManager, conf);
+ this.metrics = metrics;
}
/**
@@ -104,6 +107,7 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
List<DatanodeDetails> favoredNodes,
final int nodesRequired, long metadataSizeRequired,
long dataSizeRequired) throws SCMException {
+ metrics.incrDatanodeRequestCount(nodesRequired);
List<DatanodeDetails> healthyNodes = super.chooseDatanodesInternal(
usedNodes, excludedNodes, favoredNodes, nodesRequired,
metadataSizeRequired, dataSizeRequired);
@@ -123,6 +127,7 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
*/
@Override
public DatanodeDetails chooseNode(List<DatanodeDetails> healthyNodes) {
+ metrics.incrDatanodeChooseAttemptCount();
int firstNodeNdx = getRand().nextInt(healthyNodes.size());
int secondNodeNdx = getRand().nextInt(healthyNodes.size());
@@ -142,6 +147,7 @@ public DatanodeDetails chooseNode(List<DatanodeDetails> healthyNodes) {
? firstNodeDetails : secondNodeDetails;
}
healthyNodes.remove(datanodeDetails);
+ metrics.incrDatanodeChooseSuccessCount();
return datanodeDetails;
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
index cdfd57d..c4f2239 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
@@ -45,6 +45,8 @@ public final class SCMContainerPlacementRandom extends SCMCommonPlacementPolicy
public static final Logger LOG =
LoggerFactory.getLogger(SCMContainerPlacementRandom.class);
+ private final SCMContainerPlacementMetrics metrics;
+
/**
* Construct a random Block Placement policy.
*
@@ -55,6 +57,7 @@ public SCMContainerPlacementRandom(final NodeManager nodeManager,
final ConfigurationSource conf, final NetworkTopology networkTopology,
final boolean fallback, final SCMContainerPlacementMetrics metrics) {
super(nodeManager, conf);
+ this.metrics = metrics;
}
/**
@@ -77,6 +80,7 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
List<DatanodeDetails> favoredNodes, final int nodesRequired,
long metadataSizeRequired, long dataSizeRequired)
throws SCMException {
+ metrics.incrDatanodeRequestCount(nodesRequired);
List<DatanodeDetails> healthyNodes =
super.chooseDatanodesInternal(usedNodes, excludedNodes, favoredNodes,
nodesRequired, metadataSizeRequired, dataSizeRequired);
@@ -96,9 +100,11 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
*/
@Override
public DatanodeDetails chooseNode(final List<DatanodeDetails> healthyNodes) {
+ metrics.incrDatanodeChooseAttemptCount();
DatanodeDetails selectedNode =
healthyNodes.get(getRand().nextInt(healthyNodes.size()));
healthyNodes.remove(selectedNode);
+ metrics.incrDatanodeChooseSuccessCount();
return selectedNode;
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 444b341..513361b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -2096,6 +2096,10 @@ public SCMHAMetrics getScmHAMetrics() {
return scmHAMetrics;
}
+ public SCMContainerPlacementMetrics getPlacementMetrics() {
+ return placementMetrics;
+ }
+
public ContainerTokenGenerator getContainerTokenGenerator() {
return containerTokenMgr != null
? containerTokenMgr
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 34678a3..e597c42 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -51,6 +51,7 @@
* Test for the scm container placement.
*/
public class TestSCMContainerPlacementCapacity {
+
@Test
public void chooseDatanodes() throws SCMException {
//given
@@ -120,7 +121,7 @@ public void chooseDatanodes() throws SCMException {
SCMContainerPlacementCapacity scmContainerPlacementRandom =
new SCMContainerPlacementCapacity(mockNodeManager, conf, null, true,
- null);
+ mock(SCMContainerPlacementMetrics.class));
List<DatanodeDetails> existingNodes = new ArrayList<>();
existingNodes.add(datanodes.get(0));
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index 7b8a7c4..c17e6a5 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -91,7 +91,7 @@ public void chooseDatanodes() throws SCMException {
SCMContainerPlacementRandom scmContainerPlacementRandom =
new SCMContainerPlacementRandom(mockNodeManager, conf, null, true,
- null);
+ mock(SCMContainerPlacementMetrics.class));
List<DatanodeDetails> existingNodes = new ArrayList<>();
existingNodes.add(datanodes.get(0));
@@ -131,7 +131,7 @@ public void testPlacementPolicySatisified() {
NodeManager mockNodeManager = mock(NodeManager.class);
SCMContainerPlacementRandom scmContainerPlacementRandom =
new SCMContainerPlacementRandom(mockNodeManager, conf, null, true,
- null);
+ mock(SCMContainerPlacementMetrics.class));
ContainerPlacementStatus status =
scmContainerPlacementRandom.validateContainerPlacement(datanodes, 3);
assertTrue(status.isPolicySatisfied());
@@ -210,7 +210,7 @@ public void testIsValidNode() throws SCMException {
SCMContainerPlacementRandom scmContainerPlacementRandom =
new SCMContainerPlacementRandom(mockNodeManager, conf, null, true,
- null);
+ mock(SCMContainerPlacementMetrics.class));
assertTrue(
scmContainerPlacementRandom.isValidNode(datanodes.get(0), 15L, 15L));
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
index 0aa3ace..f38a5b0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -34,6 +35,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.Mockito.mock;
/**
* Asserts that allocation strategy works as expected.
@@ -79,10 +81,10 @@ public void testCapacityPlacementYieldsBetterDataDistribution() throws
SCMContainerPlacementCapacity capacityPlacer = new
SCMContainerPlacementCapacity(nodeManagerCapacity,
new OzoneConfiguration(),
- null, true, null);
+ null, true, mock(SCMContainerPlacementMetrics.class));
SCMContainerPlacementRandom randomPlacer = new
SCMContainerPlacementRandom(nodeManagerRandom, new OzoneConfiguration(),
- null, true, null);
+ null, true, mock(SCMContainerPlacementMetrics.class));
for (int x = 0; x < opsCount; x++) {
long containerSize = random.nextInt(10) * OzoneConsts.GB;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java
deleted file mode 100644
index 4ac4431..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.client.RatisReplicationConfig;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.net.DNSToSwitchMapping;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.StaticMapping;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.stream.Collectors;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
- .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
-import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
-import static org.apache.ozone.test.MetricsAsserts.getLongCounter;
-import static org.apache.ozone.test.MetricsAsserts.getMetrics;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-/**
- * Test cases to verify the metrics exposed by SCMPipelineManager.
- */
-public class TestSCMContainerPlacementPolicyMetrics {
-
- private MiniOzoneCluster cluster;
- private MetricsRecordBuilder metrics;
- private OzoneClient ozClient = null;
- private ObjectStore store = null;
-
- @BeforeEach
- public void setup() throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
- "org.apache.hadoop.hdds.scm.container.placement.algorithms." +
- "SCMContainerPlacementRackAware");
- // TODO enable when RATIS-788 is fixed
- conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false);
- conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
- StaticMapping.class, DNSToSwitchMapping.class);
- StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
- Collections.singleton(HddsUtils.getHostName(conf))).get(0),
- "/rack1");
- conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(4)
- .build();
- cluster.waitForClusterToBeReady();
- metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName());
- ozClient = OzoneClientFactory.getRpcClient(conf);
- store = ozClient.getObjectStore();
- }
-
- /**
- * Verifies container placement metric.
- */
- @Test @Timeout(unit = TimeUnit.MILLISECONDS, value = 60000)
- public void test() throws IOException, TimeoutException {
- String volumeName = UUID.randomUUID().toString();
- String bucketName = UUID.randomUUID().toString();
-
- String value = "sample value";
- store.createVolume(volumeName);
- OzoneVolume volume = store.getVolume(volumeName);
- volume.createBucket(bucketName);
- OzoneBucket bucket = volume.getBucket(bucketName);
- String keyName = UUID.randomUUID().toString();
-
- // Write data into a key
- try (OzoneOutputStream out = bucket.createKey(keyName,
- value.getBytes(UTF_8).length, ReplicationType.RATIS,
- THREE, new HashMap<>())) {
- out.write(value.getBytes(UTF_8));
- }
-
- // close container
- PipelineManager manager =
- cluster.getStorageContainerManager().getPipelineManager();
- List<Pipeline> pipelines = manager.getPipelines().stream().filter(p ->
- RatisReplicationConfig
- .hasFactor(p.getReplicationConfig(), ReplicationFactor.THREE))
- .collect(Collectors.toList());
- Pipeline targetPipeline = pipelines.get(0);
- List<DatanodeDetails> nodes = targetPipeline.getNodes();
- manager.closePipeline(pipelines.get(0), true);
-
- // kill datanode to trigger under-replicated container replication
- cluster.shutdownHddsDatanode(nodes.get(0));
- try {
- Thread.sleep(5 * 1000);
- } catch (InterruptedException e) {
- }
- cluster.getStorageContainerManager().getReplicationManager()
- .processAll();
- try {
- Thread.sleep(30 * 1000);
- } catch (InterruptedException e) {
- }
-
- long totalRequest = getLongCounter("DatanodeRequestCount", metrics);
- long tryCount = getLongCounter("DatanodeChooseAttemptCount", metrics);
- long sucessCount =
- getLongCounter("DatanodeChooseSuccessCount", metrics);
- long compromiseCount =
- getLongCounter("DatanodeChooseFallbackCount", metrics);
-
- // Seems no under-replicated closed containers get replicated
- assertEquals(0, totalRequest);
- assertEquals(0, tryCount);
- assertEquals(0, sucessCount);
- assertEquals(0, compromiseCount);
- }
-
- @AfterEach
- public void teardown() {
- IOUtils.closeQuietly(ozClient);
- cluster.shutdown();
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 810a572..7a1fdf2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -29,6 +29,7 @@
import static org.apache.hadoop.ozone.container.TestHelper.waitForReplicaCount;
import static org.apache.ozone.test.GenericTestUtils.setLogLevel;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.io.IOException;
@@ -37,10 +38,12 @@
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware;
@@ -104,11 +107,16 @@ void testContainerReplication(
conf.set(OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, placementPolicyClass);
try (MiniOzoneCluster cluster = newCluster(conf)) {
cluster.waitForClusterToBeReady();
+ SCMContainerPlacementMetrics metrics = cluster.getStorageContainerManager().getPlacementMetrics();
try (OzoneClient client = cluster.newClient()) {
createTestData(client);
List<OmKeyLocationInfo> keyLocations = lookupKey(cluster);
assertThat(keyLocations).isNotEmpty();
+ long datanodeChooseAttemptCount = metrics.getDatanodeChooseAttemptCount();
+ long datanodeChooseSuccessCount = metrics.getDatanodeChooseSuccessCount();
+ long datanodeChooseFallbackCount = metrics.getDatanodeChooseFallbackCount();
+ long datanodeRequestCount = metrics.getDatanodeRequestCount();
OmKeyLocationInfo keyLocation = keyLocations.get(0);
long containerID = keyLocation.getContainerID();
@@ -118,6 +126,12 @@ void testContainerReplication(
waitForReplicaCount(containerID, 2, cluster);
waitForReplicaCount(containerID, 3, cluster);
+
+ Supplier<String> messageSupplier = () -> "policy=" + placementPolicyClass + " legacy=" + legacyEnabled;
+ assertEquals(datanodeRequestCount + 1, metrics.getDatanodeRequestCount(), messageSupplier);
+ assertThat(metrics.getDatanodeChooseAttemptCount()).isGreaterThan(datanodeChooseAttemptCount);
+ assertEquals(datanodeChooseSuccessCount + 1, metrics.getDatanodeChooseSuccessCount(), messageSupplier);
+ assertThat(metrics.getDatanodeChooseFallbackCount()).isGreaterThanOrEqualTo(datanodeChooseFallbackCount);
}
}
}