HDDS-7174. Migrate SCM integration tests to JUnit 5 (#3722)

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
index 4434b40..8a91388 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
@@ -40,13 +40,11 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Matchers;
 import static org.mockito.Mockito.doCallRealMethod;
 import static org.mockito.Mockito.doNothing;
@@ -56,6 +54,7 @@
 /**
  * Class used for testing the SCM DB Checkpoint provider servlet.
  */
+@Timeout(240)
 public class TestSCMDbCheckpointServlet {
   private MiniOzoneCluster cluster = null;
   private StorageContainerManager scm;
@@ -65,11 +64,6 @@
   private String scmId;
   private String omId;
 
-  @Rule
-  public Timeout timeout = Timeout.seconds(240);
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
   /**
    * Create a MiniDFSCluster for testing.
    * <p>
@@ -77,7 +71,7 @@
    *
    * @throws Exception
    */
-  @Before
+  @BeforeEach
   public void init() throws Exception {
     conf = new OzoneConfiguration();
     clusterId = UUID.randomUUID().toString();
@@ -97,7 +91,7 @@
   /**
    * Shutdown MiniDFSCluster.
    */
-  @After
+  @AfterEach
   public void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -166,14 +160,14 @@
 
       scmDbCheckpointServletMock.doGet(requestMock, responseMock);
 
-      Assert.assertTrue(tempFile.length() > 0);
-      Assert.assertTrue(
+      Assertions.assertTrue(tempFile.length() > 0);
+      Assertions.assertTrue(
           scmMetrics.getDBCheckpointMetrics().
               getLastCheckpointCreationTimeTaken() > 0);
-      Assert.assertTrue(
+      Assertions.assertTrue(
           scmMetrics.getDBCheckpointMetrics().
               getLastCheckpointStreamingTimeTaken() > 0);
-      Assert.assertTrue(scmMetrics.getDBCheckpointMetrics().
+      Assertions.assertTrue(scmMetrics.getDBCheckpointMetrics().
           getNumCheckpoints() > initialCheckpointCount);
     } finally {
       FileUtils.deleteQuietly(tempFile);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
index 204ac92..afed4e2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
@@ -23,13 +23,12 @@
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.util.UUID;
 
@@ -43,7 +42,7 @@
   private static MiniOzoneCluster cluster;
   private static OzoneConfiguration conf;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws Exception {
     conf = new OzoneConfiguration();
     conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
@@ -76,25 +75,22 @@
     long snapshotInfo2 = scm.getScmHAManager().asSCMHADBTransactionBuffer()
         .getLatestTrxInfo().getTransactionIndex();
 
-    Assert.assertTrue(
-        String.format("Snapshot index 2 {} should greater than Snapshot " +
-            "index 1 {}", snapshotInfo2, snapshotInfo1),
-        snapshotInfo2 > snapshotInfo1);
+    Assertions.assertTrue(snapshotInfo2 > snapshotInfo1,
+        String.format("Snapshot index 2 %d should greater than Snapshot " +
+            "index 1 %d", snapshotInfo2, snapshotInfo1));
 
     cluster.restartStorageContainerManager(false);
     TransactionInfo trxInfoAfterRestart =
         scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo();
-    Assert.assertTrue(
+    Assertions.assertTrue(
         trxInfoAfterRestart.getTransactionIndex() >= snapshotInfo2);
-    try {
-      pipelineManager.getPipeline(ratisPipeline1.getId());
-      pipelineManager.getPipeline(ratisPipeline2.getId());
-    } catch (PipelineNotFoundException e) {
-      Assert.fail("Should not see a PipelineNotFoundException");
-    }
+    Assertions.assertDoesNotThrow(() ->
+        pipelineManager.getPipeline(ratisPipeline1.getId()));
+    Assertions.assertDoesNotThrow(() ->
+        pipelineManager.getPipeline(ratisPipeline2.getId()));
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
index 2ac274d..9aedad9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
@@ -33,39 +33,31 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import java.io.IOException;
 import java.util.HashMap;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
-import org.junit.Rule;
-import org.junit.rules.Timeout;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.fail;
 
 /**
  * Class used to test {@link SCMContainerManagerMetrics}.
  */
+@Timeout(300)
 public class TestSCMContainerManagerMetrics {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private MiniOzoneCluster cluster;
   private StorageContainerManager scm;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HDDS_CONTAINER_REPORT_INTERVAL, "3000s");
@@ -76,7 +68,7 @@
   }
 
 
-  @After
+  @AfterEach
   public void teardown() {
     cluster.shutdown();
   }
@@ -94,22 +86,19 @@
             HddsProtos.ReplicationFactor.ONE), OzoneConsts.OZONE);
 
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
+    Assertions.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
         metrics), ++numSuccessfulCreateContainers);
 
-    try {
-      containerManager.allocateContainer(
-          RatisReplicationConfig.getInstance(
-              HddsProtos.ReplicationFactor.THREE), OzoneConsts.OZONE);
-      fail("testContainerOpsMetrics failed");
-    } catch (IOException ex) {
-      // Here it should fail, so it should have the old metric value.
-      metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-      Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
-          metrics), numSuccessfulCreateContainers);
-      Assert.assertEquals(getLongCounter("NumFailureCreateContainers",
-          metrics), 1);
-    }
+    Assertions.assertThrows(IOException.class, () ->
+        containerManager.allocateContainer(
+            RatisReplicationConfig.getInstance(
+                HddsProtos.ReplicationFactor.THREE), OzoneConsts.OZONE));
+    // allocateContainer should fail, so it should have the old metric value.
+    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+    Assertions.assertEquals(getLongCounter("NumSuccessfulCreateContainers",
+        metrics), numSuccessfulCreateContainers);
+    Assertions.assertEquals(getLongCounter("NumFailureCreateContainers",
+        metrics), 1);
 
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
     long numSuccessfulDeleteContainers = getLongCounter(
@@ -119,28 +108,24 @@
         ContainerID.valueOf(containerInfo.getContainerID()));
 
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
+    Assertions.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
         metrics), numSuccessfulDeleteContainers + 1);
 
-    try {
-      // Give random container to delete.
-      containerManager.deleteContainer(
-          ContainerID.valueOf(RandomUtils.nextLong(10000, 20000)));
-      fail("testContainerOpsMetrics failed");
-    } catch (ContainerNotFoundException ex) {
-      // Here it should fail, so it should have the old metric value.
-      metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-      Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
-          metrics), numSuccessfulCreateContainers);
-      Assert.assertEquals(getLongCounter("NumFailureDeleteContainers",
-          metrics), 1);
-    }
+    Assertions.assertThrows(ContainerNotFoundException.class, () ->
+        containerManager.deleteContainer(
+            ContainerID.valueOf(RandomUtils.nextLong(10000, 20000))));
+    // deleteContainer should fail, so it should have the old metric value.
+    metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
+    Assertions.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
+        metrics), numSuccessfulCreateContainers);
+    Assertions.assertEquals(getLongCounter("NumFailureDeleteContainers",
+        metrics), 1);
 
     long currentValue = getLongCounter("NumListContainerOps", metrics);
     containerManager.getContainers(
         ContainerID.valueOf(containerInfo.getContainerID()), 1);
     metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(currentValue + 1,
+    Assertions.assertEquals(currentValue + 1,
         getLongCounter("NumListContainerOps", metrics));
 
   }
@@ -153,8 +138,8 @@
 
     MetricsRecordBuilder metrics =
         getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
-    Assert.assertEquals(getLongCounter("NumContainerReportsProcessedSuccessful",
-        metrics), 1);
+    Assertions.assertEquals(1L,
+        getLongCounter("NumContainerReportsProcessedSuccessful", metrics));
 
     // Create key should create container on DN.
     cluster.getRpcClient().getObjectStore().getClientProxy()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
index b631baa..03cd0d6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
@@ -30,8 +30,8 @@
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 
 import org.apache.ozone.test.LambdaTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.List;
@@ -86,7 +86,7 @@
         false);
     init(3, conf);
     waitForPipelineCreated(2);
-    Assert.assertEquals(2, pipelineManager.getPipelines(ReplicationConfig
+    Assertions.assertEquals(2, pipelineManager.getPipelines(ReplicationConfig
         .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
             ReplicationFactor.THREE)).size());
     assertNotSamePeers();
@@ -102,12 +102,10 @@
     waitForPipelineCreated(1);
     // datanode pipeline limit is set to 2, but only one set of 3 pipelines
     // will be created. Further pipeline creation should fail
-    Assert.assertEquals(1, pipelineManager.getPipelines(RATIS_THREE).size());
-    try {
-      pipelineManager.createPipeline(RATIS_THREE);
-      Assert.fail();
-    } catch (IOException ex) {
-    }
+    Assertions.assertEquals(1,
+        pipelineManager.getPipelines(RATIS_THREE).size());
+    Assertions.assertThrows(IOException.class, () ->
+        pipelineManager.createPipeline(RATIS_THREE));
     shutdown();
   }
 
@@ -123,20 +121,18 @@
     // For example, with d1,d2, d3, d4, d5, only d1 d2 d3 and d1 d4 d5 can form
     // pipeline as the none of peers from any of existing pipelines will be
     // repeated
-    Assert.assertEquals(2, pipelineManager.getPipelines(RATIS_THREE).size());
+    Assertions.assertEquals(2,
+        pipelineManager.getPipelines(RATIS_THREE).size());
     List<DatanodeDetails> dns = nodeManager.getAllNodes().stream()
         .filter((dn) -> nodeManager.getPipelinesCount(dn) > 2).collect(
             Collectors.toList());
-    Assert.assertEquals(1, dns.size());
-    try {
-      pipelineManager.createPipeline(RATIS_THREE);
-      Assert.fail();
-    } catch (IOException ex) {
-    }
+    Assertions.assertEquals(1, dns.size());
+    Assertions.assertThrows(IOException.class, () ->
+        pipelineManager.createPipeline(RATIS_THREE));
     Collection<PipelineID> pipelineIds = nodeManager.getPipelines(dns.get(0));
     // Only one dataode should have 3 pipelines in total, 1 RATIS ONE pipeline
     // and 2 RATIS 3 pipeline
-    Assert.assertEquals(3, pipelineIds.size());
+    Assertions.assertEquals(3, pipelineIds.size());
     List<Pipeline> pipelines = new ArrayList<>();
     pipelineIds.forEach((id) -> {
       try {
@@ -144,21 +140,21 @@
       } catch (PipelineNotFoundException pnfe) {
       }
     });
-    Assert.assertEquals(1, pipelines.stream()
+    Assertions.assertEquals(1, pipelines.stream()
         .filter((p) -> (p.getReplicationConfig().getRequiredNodes() == 1))
-        .collect(Collectors.toList()).size());
-    Assert.assertEquals(2, pipelines.stream()
+        .count());
+    Assertions.assertEquals(2, pipelines.stream()
         .filter((p) -> (p.getReplicationConfig().getRequiredNodes() == 3))
-        .collect(Collectors.toList()).size());
+        .count());
     shutdown();
   }
   private void assertNotSamePeers() {
     nodeManager.getAllNodes().forEach((dn) -> {
       Collection<DatanodeDetails> peers = nodeManager.getPeerList(dn);
-      Assert.assertFalse(peers.contains(dn));
+      Assertions.assertFalse(peers.contains(dn));
       List<DatanodeDetails> trimList = nodeManager.getAllNodes();
       trimList.remove(dn);
-      Assert.assertTrue(peers.containsAll(trimList));
+      Assertions.assertTrue(peers.containsAll(trimList));
     });
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index d891caa..6604cd6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -25,35 +25,28 @@
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
-import org.junit.Rule;
-import org.junit.rules.Timeout;
 
 /**
  * Test for the Node2Pipeline map.
  */
+@Timeout(300)
 public class TestNode2PipelineMap {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private MiniOzoneCluster cluster;
   private OzoneConfiguration conf;
   private StorageContainerManager scm;
@@ -66,7 +59,7 @@
    *
    * @throws IOException
    */
-  @Before
+  @BeforeEach
   public void init() throws Exception {
     conf = new OzoneConfiguration();
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
@@ -85,7 +78,7 @@
   /**
    * Shutdown MiniDFSCluster.
    */
-  @After
+  @AfterEach
   public void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -100,17 +93,18 @@
         .getContainersInPipeline(ratisContainer.getPipeline().getId());
 
     ContainerID cId = ratisContainer.getContainerInfo().containerID();
-    Assert.assertEquals(1, set.size());
+    Assertions.assertEquals(1, set.size());
     set.forEach(containerID ->
-        Assert.assertEquals(containerID, cId));
+        Assertions.assertEquals(containerID, cId));
 
     List<DatanodeDetails> dns = ratisContainer.getPipeline().getNodes();
-    Assert.assertEquals(3, dns.size());
+    Assertions.assertEquals(3, dns.size());
 
     // get pipeline details by dnid
     Set<PipelineID> pipelines = scm.getScmNodeManager()
         .getPipelines(dns.get(0));
-    Assert.assertTrue(pipelines.contains(ratisContainer.getPipeline().getId()));
+    Assertions.assertTrue(
+        pipelines.contains(ratisContainer.getPipeline().getId()));
 
     // Now close the container and it should not show up while fetching
     // containers by pipeline
@@ -120,13 +114,13 @@
         .updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
     Set<ContainerID> set2 = pipelineManager.getContainersInPipeline(
         ratisContainer.getPipeline().getId());
-    Assert.assertEquals(0, set2.size());
+    Assertions.assertEquals(0, set2.size());
 
     pipelineManager
         .closePipeline(ratisContainer.getPipeline(), false);
     pipelines = scm.getScmNodeManager()
         .getPipelines(dns.get(0));
-    Assert
-        .assertFalse(pipelines.contains(ratisContainer.getPipeline().getId()));
+    Assertions.assertFalse(
+        pipelines.contains(ratisContainer.getPipeline().getId()));
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
index 89a44c8..0a64e13 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
@@ -27,28 +27,20 @@
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.time.Duration;
 import java.util.List;
-import org.junit.Rule;
-import org.junit.rules.Timeout;
 
 /**
  * Test Node failure detection and handling in Ratis.
  */
 public class TestNodeFailure {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private static MiniOzoneCluster cluster;
   private static List<Pipeline> ratisPipelines;
   private static PipelineManager pipelineManager;
@@ -59,7 +51,7 @@
    *
    * @throws IOException
    */
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     final OzoneConfiguration conf = new OzoneConfiguration();
     DatanodeRatisServerConfig ratisServerConfig =
@@ -90,7 +82,7 @@
   /**
    * Shutdown MiniDFSCluster.
    */
-  @AfterClass
+  @AfterAll
   public static void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -112,7 +104,7 @@
           }
         }, timeForFailure / 2, timeForFailure * 3);
       } catch (Exception e) {
-        Assert.fail("Test Failed: " + e.getMessage());
+        Assertions.fail("Test Failed: " + e.getMessage());
       }
     });
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
index 0d46111..67cccf5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
@@ -26,30 +26,24 @@
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
-import org.junit.Rule;
-import org.junit.rules.Timeout;
 
 /**
  * Test SCM restart and recovery wrt pipelines.
  */
+@Timeout(300)
 public class TestSCMRestart {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private static MiniOzoneCluster cluster;
   private static OzoneConfiguration conf;
   private static Pipeline ratisPipeline1;
@@ -63,7 +57,7 @@
    *
    * @throws IOException
    */
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new OzoneConfiguration();
     conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000,
@@ -103,7 +97,7 @@
   /**
    * Shutdown MiniDFSCluster.
    */
-  @AfterClass
+  @AfterAll
   public static void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -118,16 +112,17 @@
         pipelineManager.getPipeline(ratisPipeline1.getId());
     Pipeline ratisPipeline2AfterRestart =
         pipelineManager.getPipeline(ratisPipeline2.getId());
-    Assert.assertNotSame(ratisPipeline1AfterRestart, ratisPipeline1);
-    Assert.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2);
-    Assert.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1);
-    Assert.assertEquals(ratisPipeline2AfterRestart, ratisPipeline2);
+    Assertions.assertNotSame(ratisPipeline1AfterRestart, ratisPipeline1);
+    Assertions.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2);
+    Assertions.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1);
+    Assertions.assertEquals(ratisPipeline2AfterRestart, ratisPipeline2);
 
     // Try creating a new container, it should be from the same pipeline
     // as was before restart
     ContainerInfo containerInfo = newContainerManager
         .allocateContainer(RatisReplicationConfig.getInstance(
             ReplicationFactor.THREE), "Owner1");
-    Assert.assertEquals(containerInfo.getPipelineID(), ratisPipeline1.getId());
+    Assertions.assertEquals(ratisPipeline1.getId(),
+        containerInfo.getPipelineID());
   }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
index 09f7b19..17696fb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
@@ -30,24 +30,24 @@
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
+import java.nio.file.Path;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * This class tests SCM Safe mode with pipeline rules.
  */
 
-@Ignore
+@Disabled
 public class TestSCMSafeModeWithPipelineRules {
 
   private MiniOzoneCluster cluster;
@@ -55,12 +55,9 @@
   private PipelineManager pipelineManager;
   private MiniOzoneCluster.Builder clusterBuilder;
 
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  public void setup(int numDatanodes) throws Exception {
+  public void setup(int numDatanodes, Path metadataDir) throws Exception {
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        temporaryFolder.newFolder().toString());
+        metadataDir.toAbsolutePath().toString());
     conf.setBoolean(
         HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
         true);
@@ -81,10 +78,10 @@
 
 
   @Test
-  public void testScmSafeMode() throws Exception {
+  public void testScmSafeMode(@TempDir Path tempDir) throws Exception {
 
     int datanodeCount = 6;
-    setup(datanodeCount);
+    setup(datanodeCount, tempDir);
 
     waitForRatis3NodePipelines(datanodeCount / 3);
     waitForRatis1NodePipelines(datanodeCount);
@@ -116,7 +113,7 @@
         cluster.getStorageContainerManager().getScmSafeModeManager();
 
 
-    // Ceil(0.1 * 2) is 1, as one pipeline is healthy healthy pipeline rule is
+    // Ceil(0.1 * 2) is 1, as one pipeline is healthy pipeline rule is
     // satisfied
 
     GenericTestUtils.waitFor(() ->
@@ -129,7 +126,7 @@
         !scmSafeModeManager.getOneReplicaPipelineSafeModeRule()
             .validate(), 1000, 60000);
 
-    Assert.assertTrue(cluster.getStorageContainerManager().isInSafeMode());
+    Assertions.assertTrue(cluster.getStorageContainerManager().isInSafeMode());
 
     DatanodeDetails restartedDatanode = pipelineList.get(1).getFirstNode();
     // Now restart one datanode from the 2nd pipeline
@@ -149,7 +146,7 @@
 
     // As after safemode wait time is not completed, we should have total
     // pipeline's as original count 6(1 node pipelines) + 2 (3 node pipeline)
-    Assert.assertEquals(totalPipelineCount,
+    Assertions.assertEquals(totalPipelineCount,
         pipelineManager.getPipelines().size());
 
     ReplicationManager replicationManager =
@@ -182,7 +179,7 @@
 
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
index 6586055..fef4ef1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
@@ -25,34 +25,25 @@
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 /**
  * Test allocate container calls.
  */
+@Timeout(300)
 public class TestAllocateContainer {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private static MiniOzoneCluster cluster;
   private static OzoneConfiguration conf;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private static XceiverClientManager xceiverClientManager;
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new OzoneConfiguration();
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
@@ -62,7 +53,7 @@
     xceiverClientManager = new XceiverClientManager(conf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws InterruptedException {
     if (cluster != null) {
       cluster.shutdown();
@@ -77,16 +68,16 @@
             SCMTestUtils.getReplicationType(conf),
             SCMTestUtils.getReplicationFactor(conf),
             OzoneConsts.OZONE);
-    Assert.assertNotNull(container);
-    Assert.assertNotNull(container.getPipeline().getFirstNode());
+    Assertions.assertNotNull(container);
+    Assertions.assertNotNull(container.getPipeline().getFirstNode());
 
   }
 
   @Test
-  public void testAllocateNull() throws Exception {
-    thrown.expect(NullPointerException.class);
-    storageContainerLocationClient.allocateContainer(
-        SCMTestUtils.getReplicationType(conf),
-        SCMTestUtils.getReplicationFactor(conf), null);
+  public void testAllocateNull() {
+    Assertions.assertThrows(NullPointerException.class, () ->
+        storageContainerLocationClient.allocateContainer(
+            SCMTestUtils.getReplicationType(conf),
+            SCMTestUtils.getReplicationFactor(conf), null));
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
index 3336408..21547b8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
@@ -22,7 +22,6 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -31,16 +30,15 @@
 import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.time.Duration;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
-import static junit.framework.TestCase.assertEquals;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
@@ -49,7 +47,8 @@
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Integration test to ensure a container can be closed and its replicas
@@ -63,7 +62,7 @@
   private OzoneBucket bucket;
   private MiniOzoneCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     final int interval = 100;
@@ -91,7 +90,7 @@
     bucket = TestDataUtil.createVolumeAndBucket(cluster, volName, bucketName);
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     if (cluster != null) {
       cluster.shutdown();
@@ -143,14 +142,9 @@
    * @return
    */
   private Set<ContainerReplica> getContainerReplicas(ContainerInfo c) {
-    Set<ContainerReplica> replicas = null;
-    try {
-      replicas = cluster.getStorageContainerManager()
-          .getContainerManager().getContainerReplicas(c.containerID());
-    } catch (ContainerNotFoundException e) {
-      fail("Unexpected ContainerNotFoundException");
-    }
-    return replicas;
+    return assertDoesNotThrow(() -> cluster.getStorageContainerManager()
+        .getContainerManager().getContainerReplicas(c.containerID()),
+        "Unexpected exception while retrieving container replicas");
   }
 
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index 5c467a0..e957ed4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -36,36 +36,27 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
 /**
  * Test Container calls.
  */
+@Timeout(300)
 public class TestContainerSmallFile {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
   private static MiniOzoneCluster cluster;
   private static OzoneConfiguration ozoneConfig;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private static XceiverClientManager xceiverClientManager;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     ozoneConfig = new OzoneConfiguration();
     ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
@@ -78,7 +69,7 @@
     xceiverClientManager = new XceiverClientManager(ozoneConfig);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws InterruptedException {
     if (cluster != null) {
       cluster.shutdown();
@@ -105,7 +96,7 @@
         ContainerProtocolCalls.readSmallFile(client, blockID, null);
     String readData = response.getData().getDataBuffers().getBuffersList()
         .get(0).toStringUtf8();
-    Assert.assertEquals("data123", readData);
+    Assertions.assertEquals("data123", readData);
     xceiverClientManager.releaseClient(client, false);
   }
 
@@ -120,14 +111,12 @@
     ContainerProtocolCalls.createContainer(client,
         container.getContainerInfo().getContainerID(), null);
 
-    thrown.expect(StorageContainerException.class);
-    thrown.expectMessage("Unable to find the block");
-
     BlockID blockID = ContainerTestHelper.getTestBlockID(
         container.getContainerInfo().getContainerID());
     // Try to read a Key Container Name
-    ContainerProtos.GetSmallFileResponseProto response =
-        ContainerProtocolCalls.readSmallFile(client, blockID, null);
+    Assertions.assertThrowsExactly(StorageContainerException.class,
+        () -> ContainerProtocolCalls.readSmallFile(client, blockID, null),
+        "Unable to find the block");
     xceiverClientManager.releaseClient(client, false);
   }
 
@@ -147,14 +136,11 @@
     ContainerProtocolCalls.writeSmallFile(client, blockID,
         "data123".getBytes(UTF_8), null);
 
-    thrown.expect(StorageContainerException.class);
-    thrown.expectMessage("ContainerID 8888 does not exist");
-
-    // Try to read a invalid key
-    ContainerProtos.GetSmallFileResponseProto response =
-        ContainerProtocolCalls.readSmallFile(client,
-            ContainerTestHelper.getTestBlockID(
-                nonExistContainerID), null);
+    Assertions.assertThrowsExactly(StorageContainerException.class,
+        () -> ContainerProtocolCalls.readSmallFile(client,
+            ContainerTestHelper.getTestBlockID(nonExistContainerID),
+            null),
+        "ContainerID 8888 does not exist");
     xceiverClientManager.releaseClient(client, false);
   }
 
@@ -176,16 +162,14 @@
             .writeSmallFile(client, blockID1, "data123".getBytes(UTF_8), null);
     long bcsId = responseProto.getCommittedBlockLength().getBlockID()
         .getBlockCommitSequenceId();
-    try {
-      blockID1.setBlockCommitSequenceId(bcsId + 1);
-      //read a file with higher bcsId than the container bcsId
-      ContainerProtocolCalls
-          .readSmallFile(client, blockID1, null);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert
-          .assertTrue(sce.getResult() == ContainerProtos.Result.UNKNOWN_BCSID);
-    }
+
+    blockID1.setBlockCommitSequenceId(bcsId + 1);
+    //read a file with higher bcsId than the container bcsId
+    StorageContainerException sce =
+        Assertions.assertThrows(StorageContainerException.class, () ->
+            ContainerProtocolCalls.readSmallFile(client, blockID1, null));
+    Assertions.assertSame(ContainerProtos.Result.UNKNOWN_BCSID,
+        sce.getResult());
 
     // write a new block again to bump up the container bcsId
     BlockID blockID2 = ContainerTestHelper
@@ -193,21 +177,19 @@
     ContainerProtocolCalls
         .writeSmallFile(client, blockID2, "data123".getBytes(UTF_8), null);
 
-    try {
-      blockID1.setBlockCommitSequenceId(bcsId + 1);
-      //read a file with higher bcsId than the committed bcsId for the block
-      ContainerProtocolCalls.readSmallFile(client, blockID1, null);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert
-          .assertTrue(sce.getResult() == ContainerProtos.Result.BCSID_MISMATCH);
-    }
+    blockID1.setBlockCommitSequenceId(bcsId + 1);
+    //read a file with higher bcsId than the committed bcsId for the block
+    sce = Assertions.assertThrows(StorageContainerException.class, () ->
+        ContainerProtocolCalls.readSmallFile(client, blockID1, null));
+    Assertions.assertSame(ContainerProtos.Result.BCSID_MISMATCH,
+        sce.getResult());
+
     blockID1.setBlockCommitSequenceId(bcsId);
     ContainerProtos.GetSmallFileResponseProto response =
         ContainerProtocolCalls.readSmallFile(client, blockID1, null);
     String readData = response.getData().getDataBuffers().getBuffersList()
         .get(0).toStringUtf8();
-    Assert.assertEquals("data123", readData);
+    Assertions.assertEquals("data123", readData);
     xceiverClientManager.releaseClient(client, false);
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
index c1db6f7..cbca779 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
@@ -42,33 +42,27 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
 /**
  * Test Container calls.
  */
+@Timeout(300)
 public class TestGetCommittedBlockLengthAndPutKey {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private static MiniOzoneCluster cluster;
   private static OzoneConfiguration ozoneConfig;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private static XceiverClientManager xceiverClientManager;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     ozoneConfig = new OzoneConfiguration();
     ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
@@ -81,7 +75,7 @@
     xceiverClientManager = new XceiverClientManager(ozoneConfig);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws InterruptedException {
     if (cluster != null) {
       cluster.shutdown();
@@ -117,9 +111,9 @@
     response = ContainerProtocolCalls
         .getCommittedBlockLength(client, blockID, null);
     // make sure the block ids in the request and response are same.
-    Assert.assertTrue(
+    Assertions.assertTrue(
         BlockID.getFromProtobuf(response.getBlockID()).equals(blockID));
-    Assert.assertTrue(response.getBlockLength() == data.length);
+    Assertions.assertTrue(response.getBlockLength() == data.length);
     xceiverClientManager.releaseClient(client, false);
   }
 
@@ -136,14 +130,13 @@
     BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
     // move the container to closed state
     ContainerProtocolCalls.closeContainer(client, containerID, null);
-    try {
-      // There is no block written inside the container. The request should
-      // fail.
-      ContainerProtocolCalls.getCommittedBlockLength(client, blockID, null);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert.assertTrue(sce.getMessage().contains("Unable to find the block"));
-    }
+
+    // There is no block written inside the container. The request should fail.
+    Throwable t = Assertions.assertThrows(StorageContainerException.class,
+        () -> ContainerProtocolCalls.getCommittedBlockLength(client, blockID,
+            null));
+    Assertions.assertTrue(t.getMessage().contains("Unable to find the block"));
+
     xceiverClientManager.releaseClient(client, false);
   }
 
@@ -172,9 +165,9 @@
         ContainerTestHelper
             .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
     response = client.sendCommand(putKeyRequest).getPutBlock();
-    Assert.assertEquals(
+    Assertions.assertEquals(
         response.getCommittedBlockLength().getBlockLength(), data.length);
-    Assert.assertTrue(response.getCommittedBlockLength().getBlockID()
+    Assertions.assertTrue(response.getCommittedBlockLength().getBlockID()
         .getBlockCommitSequenceId() > 0);
     BlockID responseBlockID = BlockID
         .getFromProtobuf(response.getCommittedBlockLength().getBlockID());
@@ -183,7 +176,7 @@
     // make sure the block ids in the request and response are same.
     // This will also ensure that closing the container committed the block
     // on the Datanodes.
-    Assert.assertEquals(responseBlockID, blockID);
+    Assertions.assertEquals(responseBlockID, blockID);
     xceiverClientManager.releaseClient(client, false);
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
index d27517f..f043098 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
@@ -30,9 +30,10 @@
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import java.io.IOException;
@@ -48,24 +49,17 @@
 import javax.management.openmbean.CompositeData;
 import javax.management.openmbean.TabularData;
 
-import org.junit.Rule;
-import org.junit.rules.Timeout;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  *
  * This class is to test JMX management interface for scm information.
  */
+@Timeout(300)
 public class TestSCMMXBean {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   public static final Logger LOG = LoggerFactory.getLogger(TestSCMMXBean.class);
   private static int numOfDatanodes = 3;
   private static MiniOzoneCluster cluster;
@@ -73,7 +67,7 @@
   private static StorageContainerManager scm;
   private static MBeanServer mbs;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws IOException, TimeoutException,
       InterruptedException {
     conf = new OzoneConfiguration();
@@ -85,7 +79,7 @@
     mbs = ManagementFactory.getPlatformMBeanServer();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -205,9 +199,8 @@
    */
   private void verifyEquals(TabularData actualData,
       Map<String, Integer> expectedData) {
-    if (actualData == null || expectedData == null) {
-      fail("Data should not be null.");
-    }
+    assertNotNull(actualData);
+    assertNotNull(expectedData);
     for (Object obj : actualData.values()) {
       // Each TabularData is a set of CompositeData
       assertTrue(obj instanceof CompositeData);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java
index b72d746..ede9df8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java
@@ -23,9 +23,10 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
@@ -38,23 +39,17 @@
 import java.util.Map;
 import java.util.concurrent.TimeoutException;
 
-import org.junit.Rule;
-import org.junit.rules.Timeout;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Class which tests the SCMNodeManagerInfo Bean.
  */
+@Timeout(300)
 public class TestSCMNodeManagerMXBean {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
   public static final Logger LOG = LoggerFactory.getLogger(TestSCMMXBean.class);
   private static int numOfDatanodes = 3;
   private static MiniOzoneCluster cluster;
@@ -62,7 +57,7 @@
   private static StorageContainerManager scm;
   private static MBeanServer mbs;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws IOException, TimeoutException,
       InterruptedException {
     conf = new OzoneConfiguration();
@@ -75,7 +70,7 @@
     mbs = ManagementFactory.getPlatformMBeanServer();
   }
 
-  @AfterClass
+  @AfterAll
   public static void cleanup() {
     if (cluster != null) {
       cluster.shutdown();
@@ -130,9 +125,8 @@
 
   private void verifyEquals(TabularData actualData, Map<String, Long>
       expectedData) {
-    if (actualData == null || expectedData == null) {
-      fail("Data should not be null.");
-    }
+    assertNotNull(actualData);
+    assertNotNull(expectedData);
     for (Object obj : actualData.values()) {
       assertTrue(obj instanceof CompositeData);
       CompositeData cds = (CompositeData) obj;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
index 3683a1b..d42f29b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
@@ -31,9 +31,10 @@
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -53,7 +54,7 @@
   private List<DatanodeDetails> dnsInOrder;
   private OzoneConfiguration conf = new OzoneConfiguration();
 
-  @Before
+  @BeforeEach
   public void setup() {
     dns = new ArrayList<>();
     dns.add(MockDatanodeDetails.randomDatanodeDetails());
@@ -77,12 +78,13 @@
 
   @Test
   public void testCorrectDnsReturnedFromPipeline() throws IOException {
-    Assert.assertEquals(dnsInOrder.get(0), pipeline.getClosestNode());
-    Assert.assertEquals(dns.get(0), pipeline.getFirstNode());
-    Assert.assertNotEquals(dns.get(0), dnsInOrder.get(0));
+    Assertions.assertEquals(dnsInOrder.get(0), pipeline.getClosestNode());
+    Assertions.assertEquals(dns.get(0), pipeline.getFirstNode());
+    Assertions.assertNotEquals(dns.get(0), dnsInOrder.get(0));
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(5)
   public void testRandomFirstNodeIsCommandTarget() throws IOException {
     final ArrayList<DatanodeDetails> allDNs = new ArrayList<>(dns);
     // Using a new Xceiver Client, call it repeatedly until all DNs in the
@@ -124,7 +126,7 @@
       };
       invokeXceiverClientGetBlock(client);
     }
-    Assert.assertEquals(1, seenDNs.size());
+    Assertions.assertEquals(1, seenDNs.size());
   }
 
   @Test
@@ -146,7 +148,7 @@
       invokeXceiverClientGetBlock(client);
       invokeXceiverClientReadChunk(client);
       invokeXceiverClientReadSmallFile(client);
-      Assert.assertEquals(1, seenDNs.size());
+      Assertions.assertEquals(1, seenDNs.size());
     }
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
index 0a349d0..ae9cb88 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
@@ -32,13 +32,12 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
 import java.io.IOException;
 import java.util.UUID;
 
@@ -47,22 +46,15 @@
 /**
  * Test for XceiverClientManager caching and eviction.
  */
+@Timeout(300)
 public class TestXceiverClientManager {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
   private static OzoneConfiguration config;
   private static MiniOzoneCluster cluster;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     config = new OzoneConfiguration();
     cluster = MiniOzoneCluster.newBuilder(config)
@@ -73,7 +65,7 @@
         .getStorageContainerLocationClient();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -97,7 +89,7 @@
             OzoneConsts.OZONE);
     XceiverClientSpi client1 = clientManager
         .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, client1.getRefcount());
+    Assertions.assertEquals(1, client1.getRefcount());
 
     ContainerWithPipeline container2 = storageContainerLocationClient
         .allocateContainer(
@@ -106,13 +98,13 @@
             OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
-    Assert.assertEquals(1, client2.getRefcount());
+    Assertions.assertEquals(1, client2.getRefcount());
 
     XceiverClientSpi client3 = clientManager
         .acquireClient(container1.getPipeline());
-    Assert.assertEquals(2, client3.getRefcount());
-    Assert.assertEquals(2, client1.getRefcount());
-    Assert.assertEquals(client1, client3);
+    Assertions.assertEquals(2, client3.getRefcount());
+    Assertions.assertEquals(2, client1.getRefcount());
+    Assertions.assertEquals(client1, client3);
     clientManager.releaseClient(client1, false);
     clientManager.releaseClient(client2, false);
     clientManager.releaseClient(client3, false);
@@ -138,8 +130,8 @@
             OzoneConsts.OZONE);
     XceiverClientSpi client1 = clientManager
         .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, client1.getRefcount());
-    Assert.assertEquals(container1.getPipeline(),
+    Assertions.assertEquals(1, client1.getRefcount());
+    Assertions.assertEquals(container1.getPipeline(),
         client1.getPipeline());
 
     ContainerWithPipeline container2 =
@@ -149,14 +141,14 @@
             OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
-    Assert.assertEquals(1, client2.getRefcount());
-    Assert.assertNotEquals(client1, client2);
+    Assertions.assertEquals(1, client2.getRefcount());
+    Assertions.assertNotEquals(client1, client2);
 
     // least recent container (i.e containerName1) is evicted
     XceiverClientSpi nonExistent1 = cache.getIfPresent(
         container1.getContainerInfo().getPipelineID().getId().toString()
             + container1.getContainerInfo().getReplicationType());
-    Assert.assertEquals(null, nonExistent1);
+    Assertions.assertEquals(null, nonExistent1);
     // However container call should succeed because of refcount on the client.
     ContainerProtocolCalls.createContainer(client1,
         container1.getContainerInfo().getContainerID(), null);
@@ -165,16 +157,13 @@
     // and any container operations should fail
     clientManager.releaseClient(client1, false);
 
-    String expectedMessage = "This channel is not connected.";
-    try {
-      ContainerProtocolCalls.createContainer(client1,
-          container1.getContainerInfo().getContainerID(), null);
-      Assert.fail("Create container should throw exception on closed"
-          + "client");
-    } catch (Exception e) {
-      Assert.assertEquals(e.getClass(), IOException.class);
-      Assert.assertTrue(e.getMessage().contains(expectedMessage));
-    }
+    // Create container should throw exception on closed client
+    Throwable t = Assertions.assertThrows(IOException.class,
+        () -> ContainerProtocolCalls.createContainer(client1,
+            container1.getContainerInfo().getContainerID(), null));
+    Assertions.assertTrue(
+        t.getMessage().contains("This channel is not connected"));
+
     clientManager.releaseClient(client2, false);
   }
 
@@ -198,10 +187,10 @@
             OzoneConsts.OZONE);
     XceiverClientSpi client1 = clientManager
         .acquireClient(container1.getPipeline());
-    Assert.assertEquals(1, client1.getRefcount());
+    Assertions.assertEquals(1, client1.getRefcount());
 
     clientManager.releaseClient(client1, false);
-    Assert.assertEquals(0, client1.getRefcount());
+    Assertions.assertEquals(0, client1.getRefcount());
 
     ContainerWithPipeline container2 =
         storageContainerLocationClient.allocateContainer(
@@ -210,26 +199,22 @@
             OzoneConsts.OZONE);
     XceiverClientSpi client2 = clientManager
         .acquireClient(container2.getPipeline());
-    Assert.assertEquals(1, client2.getRefcount());
-    Assert.assertNotEquals(client1, client2);
+    Assertions.assertEquals(1, client2.getRefcount());
+    Assertions.assertNotEquals(client1, client2);
 
     // now client 1 should be evicted
     XceiverClientSpi nonExistent = cache.getIfPresent(
         container1.getContainerInfo().getPipelineID().getId().toString()
             + container1.getContainerInfo().getReplicationType());
-    Assert.assertEquals(null, nonExistent);
+    Assertions.assertEquals(null, nonExistent);
 
     // Any container operation should now fail
-    String expectedMessage = "This channel is not connected.";
-    try {
-      ContainerProtocolCalls.createContainer(client1,
-          container1.getContainerInfo().getContainerID(), null);
-      Assert.fail("Create container should throw exception on closed"
-          + "client");
-    } catch (Exception e) {
-      Assert.assertEquals(e.getClass(), IOException.class);
-      Assert.assertTrue(e.getMessage().contains(expectedMessage));
-    }
+    Throwable t = Assertions.assertThrows(IOException.class,
+        () -> ContainerProtocolCalls.createContainer(client1,
+            container1.getContainerInfo().getContainerID(), null));
+    Assertions.assertTrue(
+        t.getMessage().contains("This channel is not connected"));
+
     clientManager.releaseClient(client2, false);
   }
 
@@ -252,25 +237,25 @@
     XceiverClientSpi client1 =
         clientManager.acquireClient(container1.getPipeline());
     clientManager.acquireClient(container1.getPipeline());
-    Assert.assertEquals(2, client1.getRefcount());
+    Assertions.assertEquals(2, client1.getRefcount());
 
     // client should be invalidated in the cache
     clientManager.releaseClient(client1, true);
-    Assert.assertEquals(1, client1.getRefcount());
-    Assert.assertNull(cache.getIfPresent(
+    Assertions.assertEquals(1, client1.getRefcount());
+    Assertions.assertNull(cache.getIfPresent(
         container1.getContainerInfo().getPipelineID().getId().toString()
             + container1.getContainerInfo().getReplicationType()));
 
     // new client should be added in cache
     XceiverClientSpi client2 =
         clientManager.acquireClient(container1.getPipeline());
-    Assert.assertNotEquals(client1, client2);
-    Assert.assertEquals(1, client2.getRefcount());
+    Assertions.assertNotEquals(client1, client2);
+    Assertions.assertEquals(1, client2.getRefcount());
 
     // on releasing the old client the entry in cache should not be invalidated
     clientManager.releaseClient(client1, true);
-    Assert.assertEquals(0, client1.getRefcount());
-    Assert.assertNotNull(cache.getIfPresent(
+    Assertions.assertEquals(0, client1.getRefcount());
+    Assertions.assertNotNull(cache.getIfPresent(
         container1.getContainerInfo().getPipelineID().getId().toString()
             + container1.getContainerInfo().getReplicationType()));
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
index 967c0b9..b07474c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
@@ -46,22 +46,17 @@
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 /**
  * This class tests the metrics of XceiverClient.
  */
+@Timeout(300)
 public class TestXceiverClientMetrics {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
   // only for testing
   private volatile boolean breakFlag;
   private CountDownLatch latch;
@@ -71,7 +66,7 @@
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     config = new OzoneConfiguration();
     cluster = MiniOzoneCluster.newBuilder(config).build();
@@ -80,7 +75,7 @@
         .getStorageContainerLocationClient();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() {
     cluster.shutdown();
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
index 2736c99..b4631c2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
@@ -28,13 +28,11 @@
 import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@@ -65,6 +63,7 @@
 import java.util.stream.Collectors;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
@@ -83,7 +82,6 @@
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.fail;
 
 /**
  * Test from the scmclient for decommission and maintenance.
@@ -633,13 +631,8 @@
    * @return
    */
   private NodeStatus getNodeStatus(DatanodeDetails dn) {
-    NodeStatus status = null;
-    try {
-      status = nm.getNodeStatus(dn);
-    } catch (NodeNotFoundException e) {
-      fail("Unexpected exception getting the nodeState");
-    }
-    return status;
+    return assertDoesNotThrow(() -> nm.getNodeStatus(dn),
+        "Unexpected exception getting the nodeState");
   }
 
   /**
@@ -650,13 +643,8 @@
    * @return
    */
   private Set<ContainerReplica> getContainerReplicas(ContainerInfo c) {
-    Set<ContainerReplica> replicas = null;
-    try {
-      replicas = cm.getContainerReplicas(c.containerID());
-    } catch (ContainerNotFoundException e) {
-      fail("Unexpected ContainerNotFoundException");
-    }
-    return replicas;
+    return assertDoesNotThrow(() -> cm.getContainerReplicas(c.containerID()),
+        "Unexpected exception getting the container replicas");
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
index 56a61d1..9a71280 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
@@ -25,10 +25,11 @@
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -62,19 +63,19 @@
     .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test Query Node Operation.
  */
-@Ignore
+@Disabled
 public class TestQueryNode {
   private static int numOfDatanodes = 5;
   private MiniOzoneCluster cluster;
 
   private ContainerOperationClient scmClient;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     final int interval = 100;
@@ -98,7 +99,7 @@
     scmClient = new ContainerOperationClient(conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -109,11 +110,11 @@
   public void testHealthyNodesCount() throws Exception {
     List<HddsProtos.Node> nodes = scmClient.queryNode(null, HEALTHY,
         HddsProtos.QueryScope.CLUSTER, "");
-    assertEquals("Expected  live nodes", numOfDatanodes,
-        nodes.size());
+    assertEquals(numOfDatanodes, nodes.size(), "Expected live nodes");
   }
 
-  @Test(timeout = 10 * 1000L)
+  @Test
+  @Timeout(10)
   public void testStaleNodesCount() throws Exception {
     cluster.shutdownHddsDatanode(0);
     cluster.shutdownHddsDatanode(1);
@@ -124,7 +125,7 @@
 
     int nodeCount = scmClient.queryNode(null, STALE,
         HddsProtos.QueryScope.CLUSTER, "").size();
-    assertEquals("Mismatch of expected nodes count", 2, nodeCount);
+    assertEquals(2, nodeCount, "Mismatch of expected nodes count");
 
     GenericTestUtils.waitFor(() ->
             cluster.getStorageContainerManager().getNodeCount(DEAD) == 2,
@@ -133,12 +134,12 @@
     // Assert that we don't find any stale nodes.
     nodeCount = scmClient.queryNode(null, STALE,
         HddsProtos.QueryScope.CLUSTER, "").size();
-    assertEquals("Mismatch of expected nodes count", 0, nodeCount);
+    assertEquals(0, nodeCount, "Mismatch of expected nodes count");
 
     // Assert that we find the expected number of dead nodes.
     nodeCount = scmClient.queryNode(null, DEAD,
         HddsProtos.QueryScope.CLUSTER, "").size();
-    assertEquals("Mismatch of expected nodes count", 2, nodeCount);
+    assertEquals(2, nodeCount, "Mismatch of expected nodes count");
   }
 
   @Test
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java
index 6b27cc9..69233aa 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java
@@ -20,9 +20,10 @@
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
@@ -34,27 +35,20 @@
 import java.util.Map;
 import java.util.concurrent.TimeoutException;
 
-import org.junit.Rule;
-import org.junit.rules.Timeout;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test cases to verify the metrics exposed by SCMPipelineManager via MXBean.
  */
+@Timeout(300)
 public class TestPipelineManagerMXBean {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private MiniOzoneCluster cluster;
   private MBeanServer mbs;
 
-  @Before
+  @BeforeEach
   public void init()
       throws IOException, TimeoutException, InterruptedException {
     OzoneConfiguration conf = new OzoneConfiguration();
@@ -79,11 +73,10 @@
     verifyEquals(data, datanodeInfo);
   }
 
-  private void verifyEquals(TabularData actualData, Map<String, Integer>
-      expectedData) {
-    if (actualData == null || expectedData == null) {
-      fail("Data should not be null.");
-    }
+  private void verifyEquals(TabularData actualData,
+      Map<String, Integer> expectedData) {
+    assertNotNull(actualData);
+    assertNotNull(expectedData);
     for (Object obj : actualData.values()) {
       assertTrue(obj instanceof CompositeData);
       CompositeData cds = (CompositeData) obj;
@@ -98,7 +91,7 @@
     assertTrue(expectedData.isEmpty());
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     cluster.shutdown();
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
index 90529cd..b36a681 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
@@ -35,18 +35,17 @@
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import java.util.HashMap;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
-import org.junit.Rule;
-import org.junit.rules.Timeout;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
@@ -58,18 +57,13 @@
 /**
  * Test cases to verify the SCM pipeline bytesWritten metrics.
  */
+@Timeout(300)
 public class TestSCMPipelineBytesWrittenMetrics {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private MiniOzoneCluster cluster;
   private OzoneConfiguration conf;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
@@ -109,8 +103,8 @@
         .setKeyName(keyName).setRefreshPipeline(true);
 
     OzoneKeyDetails keyDetails = bucket.getKey(keyName);
-    Assert.assertEquals(keyName, keyDetails.getName());
-    Assert.assertEquals(value.getBytes(UTF_8).length, keyDetails
+    Assertions.assertEquals(keyName, keyDetails.getName());
+    Assertions.assertEquals(value.getBytes(UTF_8).length, keyDetails
         .getOzoneKeyLocations().get(0).getLength());
   }
 
@@ -129,7 +123,7 @@
     List<Pipeline> pipelines = cluster.getStorageContainerManager()
         .getPipelineManager().getPipelines();
 
-    Assert.assertEquals(1, pipelines.size());
+    Assertions.assertEquals(1, pipelines.size());
     Pipeline pipeline = pipelines.get(0);
 
     final String metricName =
@@ -141,7 +135,7 @@
     }, 500, 300000);
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     cluster.shutdown();
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
index ed323ec..52bd98d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
@@ -30,17 +30,16 @@
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import java.io.IOException;
 import java.util.Optional;
 import java.util.concurrent.TimeoutException;
 
-import org.junit.Rule;
-import org.junit.rules.Timeout;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
@@ -48,17 +47,12 @@
 /**
  * Test cases to verify the metrics exposed by SCMPipelineManager.
  */
+@Timeout(300)
 public class TestSCMPipelineMetrics {
 
-  /**
-    * Set a timeout for each test.
-    */
-  @Rule
-  public Timeout timeout = Timeout.seconds(300);
-
   private MiniOzoneCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
@@ -79,7 +73,7 @@
     long numPipelineCreated =
         getLongCounter("NumPipelineCreated", metrics);
     // Pipelines are created in background when the cluster starts.
-    Assert.assertTrue(numPipelineCreated > 0);
+    Assertions.assertTrue(numPipelineCreated > 0);
   }
 
   /**
@@ -91,15 +85,11 @@
         .getStorageContainerManager().getPipelineManager();
     Optional<Pipeline> pipeline = pipelineManager
         .getPipelines().stream().findFirst();
-    Assert.assertTrue(pipeline.isPresent());
-    try {
-      cluster.getStorageContainerManager()
-          .getPipelineManager()
-          .closePipeline(pipeline.get(), false);
-    } catch (IOException | TimeoutException e) {
-      e.printStackTrace();
-      Assert.fail();
-    }
+    Assertions.assertTrue(pipeline.isPresent());
+    Assertions.assertDoesNotThrow(() ->
+        cluster.getStorageContainerManager()
+            .getPipelineManager()
+            .closePipeline(pipeline.get(), false));
     MetricsRecordBuilder metrics = getMetrics(
         SCMPipelineMetrics.class.getSimpleName());
     assertCounter("NumPipelineDestroyed", 1L, metrics);
@@ -117,29 +107,24 @@
     Pipeline pipeline = block.getPipeline();
     long numBlocksAllocated = getLongCounter(
         SCMPipelineMetrics.getBlockAllocationMetricName(pipeline), metrics);
-    Assert.assertEquals(numBlocksAllocated, 1);
+    Assertions.assertEquals(numBlocksAllocated, 1);
 
     // destroy the pipeline
-    try {
-      cluster.getStorageContainerManager().getClientProtocolServer()
-          .closePipeline(pipeline.getId().getProtobuf());
-    } catch (IOException e) {
-      e.printStackTrace();
-      Assert.fail();
-    }
-    metrics = getMetrics(SCMPipelineMetrics.class.getSimpleName());
-    try {
-      getLongCounter(SCMPipelineMetrics.getBlockAllocationMetricName(pipeline),
-          metrics);
-      Assert.fail("Metric should not be present for closed pipeline.");
-    } catch (AssertionError e) {
-      Assert.assertTrue(e.getMessage().contains(
-          "Expected exactly one metric for name " + SCMPipelineMetrics
-              .getBlockAllocationMetricName(block.getPipeline())));
-    }
+    Assertions.assertDoesNotThrow(() ->
+        cluster.getStorageContainerManager().getClientProtocolServer()
+            .closePipeline(pipeline.getId().getProtobuf()));
+
+    MetricsRecordBuilder finalMetrics =
+        getMetrics(SCMPipelineMetrics.class.getSimpleName());
+    Throwable t = Assertions.assertThrows(AssertionError.class, () ->
+        getLongCounter(SCMPipelineMetrics
+            .getBlockAllocationMetricName(pipeline), finalMetrics));
+    Assertions.assertTrue(t.getMessage().contains(
+        "Expected exactly one metric for name " + SCMPipelineMetrics
+            .getBlockAllocationMetricName(block.getPipeline())));
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     cluster.shutdown();
   }