Merge branch 'trunk' into rewrite-junit5-hdfs
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 10d66d0..b05292c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -61,11 +61,27 @@
       <artifactId>zookeeper</artifactId>
       <type>test-jar</type>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.junit.vintage</groupId>
+          <artifactId>junit-vintage-engine</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>io.dropwizard.metrics</groupId>
       <artifactId>metrics-core</artifactId>
       <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.xerial.snappy</groupId>
@@ -154,12 +170,6 @@
         </exclusion>
       </exclusions>
     </dependency>
-
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-minikdc</artifactId>
@@ -169,11 +179,23 @@
       <groupId>org.mockito</groupId>
       <artifactId>mockito-core</artifactId>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-log4j12</artifactId>
       <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>io.netty</groupId>
@@ -184,6 +206,16 @@
       <groupId>io.netty</groupId>
       <artifactId>netty-all</artifactId>
       <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.junit.vintage</groupId>
+          <artifactId>junit-vintage-engine</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -209,11 +241,27 @@
     <dependency>
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-databind</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-test</artifactId>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.junit.vintage</groupId>
+          <artifactId>junit-vintage-engine</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
         <groupId>org.assertj</groupId>
@@ -447,6 +495,23 @@
           </filesets>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>org.openrewrite.maven</groupId>
+        <artifactId>rewrite-maven-plugin</artifactId>
+        <version>4.9.0</version>
+        <configuration>
+          <activeRecipes>
+            <recipe>org.openrewrite.java.testing.junit5.JUnit5BestPractices</recipe>
+          </activeRecipes>
+        </configuration>
+        <dependencies>
+          <dependency>
+            <groupId>org.openrewrite.recipe</groupId>
+            <artifactId>rewrite-testing-frameworks</artifactId>
+            <version>1.7.1</version>
+          </dependency>
+        </dependencies>
+      </plugin>
     </plugins>
   </build>
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
index dcd91c7..ad168b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
@@ -18,10 +18,8 @@
 
 package org.apache.hadoop;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -31,11 +29,7 @@
 
 import org.apache.hadoop.ipc.RefreshRegistry;
 import org.apache.hadoop.ipc.RefreshResponse;
-import org.junit.Test;
-import org.junit.Before;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
+import org.junit.jupiter.api.*;
 import org.mockito.Mockito;
 
 /**
@@ -51,7 +45,7 @@
   private static RefreshHandler firstHandler;
   private static RefreshHandler secondHandler;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpBeforeClass() throws Exception {
     config = new Configuration();
     config.set("hadoop.security.authorization", "true");
@@ -61,14 +55,14 @@
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownBeforeClass() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // Register Handlers, first one just sends an ok response
     firstHandler = Mockito.mock(RefreshHandler.class);
@@ -85,7 +79,7 @@
     RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
     RefreshRegistry.defaultRegistry().unregisterAll("secondHandler");
@@ -96,7 +90,7 @@
     DFSAdmin admin = new DFSAdmin(config);
     String [] args = new String[]{"-refresh", "nn"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
+      assertEquals(-1, exitCode, "DFSAdmin should fail due to bad args");
   }
 
   @Test
@@ -105,7 +99,7 @@
     String [] args = new String[]{"-refresh", "localhost:" + 
         cluster.getNameNodePort(), "unregisteredIdentity"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
+      assertEquals(-1, exitCode, "DFSAdmin should fail due to no handler registered");
   }
 
   @Test
@@ -114,7 +108,7 @@
     String[] args = new String[]{"-refresh",
         "localhost:" + cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should succeed", 0, exitCode);
+      assertEquals(0, exitCode, "DFSAdmin should succeed");
 
     Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
     // Second handler was never called
@@ -128,11 +122,11 @@
     String[] args = new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "secondHandler", "one"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return 2", 2, exitCode);
+      assertEquals(2, exitCode, "DFSAdmin should return 2");
 
     exitCode = admin.run(new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "secondHandler", "one", "two"});
-    assertEquals("DFSAdmin should now return 3", 3, exitCode);
+      assertEquals(3, exitCode, "DFSAdmin should now return 3");
 
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one", "two"});
@@ -147,7 +141,7 @@
     String[] args = new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return -1", -1, exitCode);
+      assertEquals(-1, exitCode, "DFSAdmin should return -1");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
index e21a5a3..c0fc178 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
@@ -18,10 +18,7 @@
 
 package org.apache.hadoop;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.BindException;
@@ -40,8 +37,8 @@
 import org.apache.hadoop.ipc.FairCallQueue;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 public class TestRefreshCallQueue {
   private MiniDFSCluster cluster;
@@ -77,7 +74,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -115,9 +112,9 @@
     mockQueuePuts = 0;
     setUp(MockCallQueue.class);
 
-    assertTrue("Mock queue should have been constructed",
-        mockQueueConstructions > 0);
-    assertTrue("Puts are routed through MockQueue", canPutInMockQueue());
+      assertTrue(
+              mockQueueConstructions > 0, "Mock queue should have been constructed");
+      assertTrue(canPutInMockQueue(), "Puts are routed through MockQueue");
     int lastMockQueueConstructions = mockQueueConstructions;
 
     // Replace queue with the queue specified in core-site.xml, which would be
@@ -125,13 +122,13 @@
     DFSAdmin admin = new DFSAdmin(config);
     String [] args = new String[]{"-refreshCallQueue"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return 0", 0, exitCode);
+      assertEquals(0, exitCode, "DFSAdmin should return 0");
 
-    assertEquals("Mock queue should have no additional constructions",
-        lastMockQueueConstructions, mockQueueConstructions);
+      assertEquals(
+              lastMockQueueConstructions, mockQueueConstructions, "Mock queue should have no additional constructions");
     try {
-      assertFalse("Puts are routed through LBQ instead of MockQueue",
-          canPutInMockQueue());
+        assertFalse(
+                canPutInMockQueue(), "Puts are routed through LBQ instead of MockQueue");
     } catch (IOException ioe) {
       fail("Could not put into queue at all");
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
index 9cf2180..10dfa93 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
@@ -22,9 +22,9 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestAclCLI extends CLITestHelperDFS {
   private MiniDFSCluster cluster = null;
@@ -38,7 +38,7 @@
         DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY, false);
   }
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -49,7 +49,7 @@
     username = System.getProperty("user.name");
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     super.tearDown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
index ec31766..f05c8d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.cli;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY;
+import org.junit.jupiter.api.Test;
 
-import org.junit.Test;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY;
 
 /**
  * Test ACL CLI with POSIX ACL inheritance enabled.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
index 2f8dfa5..fb16c38c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,9 +37,9 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.tools.CacheAdmin;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.xml.sax.SAXException;
 
 public class TestCacheAdminCLI extends CLITestHelper {
@@ -51,7 +51,7 @@
   protected FileSystem fs = null;
   protected String namenode = null;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -68,11 +68,11 @@
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-               fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
index afc668c..ee9e2e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
@@ -23,7 +23,7 @@
 import java.security.NoSuchAlgorithmException;
 import java.util.UUID;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CLICommandCryptoAdmin;
@@ -45,9 +45,9 @@
 import org.apache.hadoop.hdfs.tools.CryptoAdmin;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.xml.sax.SAXException;
 
 public class TestCryptoAdminCLI extends CLITestHelperDFS {
@@ -56,7 +56,7 @@
   protected String namenode = null;
   private static File tmpDir;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -78,11 +78,11 @@
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not an HDFS: " + fs.getUri(),
-        fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not an HDFS: " + fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
index 4c27f79..3172bf2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -27,16 +27,16 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestDeleteCLI extends CLITestHelperDFS {
   protected MiniDFSCluster dfsCluster = null;
   protected FileSystem fs = null;
   protected String namenode = null;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -49,11 +49,11 @@
     namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not an HDFS: " + fs.getUri(),
-        fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not an HDFS: " + fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
index 566755d..c09d184 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
@@ -24,10 +24,10 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.xml.sax.SAXException;
 
@@ -40,7 +40,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -62,7 +62,7 @@
     return "testErasureCodingConf.xml";
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
index e0e7894..f8ef3a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -28,9 +28,9 @@
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestHDFSCLI extends CLITestHelperDFS {
 
@@ -38,7 +38,7 @@
   protected FileSystem fs = null;
   protected String namenode = null;
   
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -63,8 +63,8 @@
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-               fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
   }
 
   @Override
@@ -72,7 +72,7 @@
     return "testHDFSConf.xml";
   }
   
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
index d83baf3..793d9de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -28,16 +28,16 @@
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestXAttrCLI  extends CLITestHelperDFS {
   protected MiniDFSCluster dfsCluster = null;
   protected FileSystem fs = null;
   protected String namenode = null;
   
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -53,8 +53,8 @@
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(), 
-        fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
   }
 
   @Override
@@ -62,7 +62,7 @@
     return "testXAttrConf.xml";
   }
   
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
index 99b1ddbb..4f658d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
@@ -65,12 +65,11 @@
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 import java.util.function.Supplier;
 
@@ -85,7 +84,7 @@
 
   static private CacheManipulator prevCacheManipulator;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() {
     sockDir = new TemporarySocketDirectory();
     DomainSocket.disableBindPathValidation();
@@ -99,7 +98,7 @@
     });
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardown() {
     // Restore the original CacheManipulator
     NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
@@ -116,8 +115,8 @@
       (int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
   
   public static HdfsConfiguration initZeroCopyTest() {
-    Assume.assumeTrue(NativeIO.isAvailable());
-    Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
+    Assumptions.assumeTrue(NativeIO.isAvailable());
+    Assumptions.assumeTrue(SystemUtils.IS_OS_UNIX);
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -152,10 +151,10 @@
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -165,13 +164,13 @@
       fsIn = fs.open(TEST_PATH);
       ByteBuffer result = fsIn.read(null, BLOCK_SIZE,
           EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(BLOCK_SIZE, result.remaining());
+      Assertions.assertEquals(BLOCK_SIZE, result.remaining());
       HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
-      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+      Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
           byteBufferToArray(result));
       fsIn.releaseBuffer(result);
     } finally {
@@ -198,10 +197,10 @@
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -214,20 +213,20 @@
       HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
       ByteBuffer result =
         dfsIn.read(null, 2 * BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(BLOCK_SIZE, result.remaining());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
-      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+      Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
           byteBufferToArray(result));
       dfsIn.releaseBuffer(result);
       
       // Try to read (1 + ${BLOCK_SIZE}), but only get ${BLOCK_SIZE} because of the block size.
       result = 
           dfsIn.read(null, 1 + BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(BLOCK_SIZE, result.remaining());
-      Assert.assertArrayEquals(Arrays.copyOfRange(original, BLOCK_SIZE, 2 * BLOCK_SIZE),
+      Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+      Assertions.assertArrayEquals(Arrays.copyOfRange(original, BLOCK_SIZE, 2 * BLOCK_SIZE),
           byteBufferToArray(result));
       dfsIn.releaseBuffer(result);
     } finally {
@@ -255,10 +254,10 @@
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -270,17 +269,17 @@
       ByteBuffer result;
       try {
         result = dfsIn.read(null, BLOCK_SIZE + 1, EnumSet.noneOf(ReadOption.class));
-        Assert.fail("expected UnsupportedOperationException");
+        Assertions.fail("expected UnsupportedOperationException");
       } catch (UnsupportedOperationException e) {
         // expected
       }
       result = dfsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(BLOCK_SIZE, result.remaining());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
-      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+      Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
           byteBufferToArray(result));
     } finally {
       if (fsIn != null) fsIn.close();
@@ -311,16 +310,16 @@
         LinkedMap evictable,
         LinkedMap evictableMmapped) {
       if (expectedNumOutstandingMmaps >= 0) {
-        Assert.assertEquals(expectedNumOutstandingMmaps, numOutstandingMmaps);
+        Assertions.assertEquals(expectedNumOutstandingMmaps, numOutstandingMmaps);
       }
       if (expectedNumReplicas >= 0) {
-        Assert.assertEquals(expectedNumReplicas, replicas.size());
+        Assertions.assertEquals(expectedNumReplicas, replicas.size());
       }
       if (expectedNumEvictable >= 0) {
-        Assert.assertEquals(expectedNumEvictable, evictable.size());
+        Assertions.assertEquals(expectedNumEvictable, evictable.size());
       }
       if (expectedNumMmapedEvictable >= 0) {
-        Assert.assertEquals(expectedNumMmapedEvictable, evictableMmapped.size());
+        Assertions.assertEquals(expectedNumMmapedEvictable, evictableMmapped.size());
       }
     }
   }
@@ -346,10 +345,10 @@
     try {
       DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
     } catch (InterruptedException e) {
-      Assert.fail("unexpected InterruptedException during " +
+      Assertions.fail("unexpected InterruptedException during " +
           "waitReplication: " + e);
     } catch (TimeoutException e) {
-      Assert.fail("unexpected TimeoutException during " +
+      Assertions.fail("unexpected TimeoutException during " +
           "waitReplication: " + e);
     }
     fsIn = fs.open(TEST_PATH);
@@ -378,10 +377,10 @@
           LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             new ExtendedBlockId(firstBlock.getBlockId(), firstBlock.getBlockPoolId()));
-        Assert.assertNotNull(replica);
-        Assert.assertTrue(replica.hasMmap());
+        Assertions.assertNotNull(replica);
+        Assertions.assertTrue(replica.hasMmap());
         // The replica should not yet be evictable, since we have it open.
-        Assert.assertNull(replica.getEvictableTimeNs());
+        Assertions.assertNull(replica.getEvictableTimeNs());
       }
     });
 
@@ -449,10 +448,10 @@
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -493,22 +492,22 @@
             stream instanceof ByteBufferReadable);
 
     ByteBuffer result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
-    Assert.assertEquals(10, result.remaining());
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 10),
+    Assertions.assertEquals(10, result.remaining());
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, 10),
         byteBufferToArray(result));
 
     result = ByteBufferUtil.fallbackRead(stream, bufferPool, 5000);
-    Assert.assertEquals(5000, result.remaining());
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010),
+    Assertions.assertEquals(5000, result.remaining());
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010),
         byteBufferToArray(result));
 
     result = ByteBufferUtil.fallbackRead(stream, bufferPool, 9999999);
-    Assert.assertEquals(11375, result.remaining());
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385),
+    Assertions.assertEquals(11375, result.remaining());
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385),
         byteBufferToArray(result));
 
     result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
-    Assert.assertNull(result);
+    Assertions.assertNull(result);
   }
 
   /**
@@ -533,10 +532,10 @@
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -618,7 +617,7 @@
     try {
       result = fsIn.read(null, TEST_FILE_LENGTH / 2,
           EnumSet.noneOf(ReadOption.class));
-      Assert.fail("expected UnsupportedOperationException");
+      Assertions.fail("expected UnsupportedOperationException");
     } catch (UnsupportedOperationException e) {
       // expected
     }
@@ -637,9 +636,9 @@
       result = fsIn.read(null, TEST_FILE_LENGTH,
           EnumSet.noneOf(ReadOption.class));
     } catch (UnsupportedOperationException e) {
-      Assert.fail("expected to be able to read cached file via zero-copy");
+      Assertions.fail("expected to be able to read cached file via zero-copy");
     }
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0,
         BLOCK_SIZE), byteBufferToArray(result));
     // Test that files opened after the cache operation has finished
     // still get the benefits of zero-copy (regression test for HDFS-6086)
@@ -648,9 +647,9 @@
       result2 = fsIn2.read(null, TEST_FILE_LENGTH,
           EnumSet.noneOf(ReadOption.class));
     } catch (UnsupportedOperationException e) {
-      Assert.fail("expected to be able to read cached file via zero-copy");
+      Assertions.fail("expected to be able to read cached file via zero-copy");
     }
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0,
         BLOCK_SIZE), byteBufferToArray(result2));
     fsIn2.releaseBuffer(result2);
     fsIn2.close();
@@ -688,10 +687,10 @@
               Map<ExtendedBlockId, InvalidToken> failedLoads,
               LinkedMap evictable,
               LinkedMap evictableMmapped) {
-            Assert.assertEquals(expectedOutstandingMmaps, numOutstandingMmaps);
+            Assertions.assertEquals(expectedOutstandingMmaps, numOutstandingMmaps);
             ShortCircuitReplica replica =
                 replicas.get(ExtendedBlockId.fromExtendedBlock(block));
-            Assert.assertNotNull(replica);
+            Assertions.assertNotNull(replica);
             Slot slot = replica.getSlot();
             if ((expectedIsAnchorable != slot.isAnchorable()) ||
                 (expectedIsAnchored != slot.isAnchored())) {
@@ -734,7 +733,7 @@
       fsIn = fs.open(TEST_PATH);
       try {
         fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-        Assert.fail("expected zero-copy read to fail when client mmaps " +
+        Assertions.fail("expected zero-copy read to fail when client mmaps " +
             "were disabled.");
       } catch (UnsupportedOperationException e) {
       }
@@ -764,7 +763,7 @@
       // Test EOF behavior
       IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
       buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(null, buf);
+      Assertions.assertEquals(null, buf);
     } finally {
       if (fsIn != null) fsIn.close();
       if (fs != null) fs.close();
@@ -774,7 +773,7 @@
   
   @Test
   public void test2GBMmapLimit() throws Exception {
-    Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
+    Assumptions.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
     HdfsConfiguration conf = initZeroCopyTest();
     final long TEST_FILE_LENGTH = 2469605888L;
     conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
@@ -795,20 +794,20 @@
       
       fsIn = fs.open(TEST_PATH);
       buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(1, buf1.remaining());
+      Assertions.assertEquals(1, buf1.remaining());
       fsIn.releaseBuffer(buf1);
       buf1 = null;
       fsIn.seek(2147483640L);
       buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(7, buf1.remaining());
-      Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
+      Assertions.assertEquals(7, buf1.remaining());
+      Assertions.assertEquals(Integer.MAX_VALUE, buf1.limit());
       fsIn.releaseBuffer(buf1);
       buf1 = null;
-      Assert.assertEquals(2147483647L, fsIn.getPos());
+      Assertions.assertEquals(2147483647L, fsIn.getPos());
       try {
         buf1 = fsIn.read(null, 1024,
             EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-        Assert.fail("expected UnsupportedOperationException");
+        Assertions.fail("expected UnsupportedOperationException");
       } catch (UnsupportedOperationException e) {
         // expected; can't read past 2GB boundary.
       }
@@ -825,13 +824,13 @@
       fsIn2 = fs.open(TEST_PATH2);
       fsIn2.seek(2147483640L);
       buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(8, buf2.remaining());
-      Assert.assertEquals(2147483648L, fsIn2.getPos());
+      Assertions.assertEquals(8, buf2.remaining());
+      Assertions.assertEquals(2147483648L, fsIn2.getPos());
       fsIn2.releaseBuffer(buf2);
       buf2 = null;
       buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(1024, buf2.remaining());
-      Assert.assertEquals(2147484672L, fsIn2.getPos());
+      Assertions.assertEquals(1024, buf2.remaining());
+      Assertions.assertEquals(2147484672L, fsIn2.getPos());
       fsIn2.releaseBuffer(buf2);
       buf2 = null;
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
index 0d3e6ff..2d2cb4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
@@ -27,10 +27,10 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestFcHdfsCreateMkdir extends
                     FileContextCreateMkdirBaseTest {
@@ -44,7 +44,7 @@
   }
 
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException, URISyntaxException  {
     Configuration conf = new HdfsConfiguration();
@@ -56,7 +56,7 @@
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -64,13 +64,13 @@
   }
   
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     super.setUp();
   }
   
   @Override
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
index 10ae1ef4..5ff51a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
@@ -27,10 +27,10 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestFcHdfsPermission extends FileContextPermissionBase {
   
@@ -51,7 +51,7 @@
     return fc;
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException, URISyntaxException  {
     Configuration conf = new HdfsConfiguration();
@@ -63,7 +63,7 @@
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -71,13 +71,13 @@
   }
   
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     super.setUp();
   }
   
   @Override
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
index eef2235..cbd8489 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
@@ -30,12 +30,13 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestFcHdfsSetUMask {
   
@@ -78,7 +79,7 @@
   private static final FsPermission WIDE_OPEN_TEST_UMASK = FsPermission
       .createImmutable((short) (0777 ^ 0777));
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining()
         throws IOException, LoginException, URISyntaxException  {
     Configuration conf = new HdfsConfiguration();
@@ -91,20 +92,20 @@
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fc.setUMask(WIDE_OPEN_TEST_UMASK);
     fc.mkdir(fileContextTestHelper.getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fc.delete(fileContextTestHelper.getTestRootPath(fc), true);
   }
@@ -194,8 +195,8 @@
     fc.setUMask(umask);
     fc.mkdir(f, FileContext.DEFAULT_PERM, true);
     Assert.assertTrue(isDir(fc, f));
-    Assert.assertEquals("permissions on directory are wrong",  
-        expectedPerms, fc.getFileStatus(f).getPermission());
+      Assertions.assertEquals(
+              expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on directory are wrong");
   }
   
   public void testMkdirRecursiveWithNonExistingDir(FsPermission umask,
@@ -205,11 +206,11 @@
     fc.setUMask(umask);
     fc.mkdir(f, FileContext.DEFAULT_PERM, true);
     Assert.assertTrue(isDir(fc, f));
-    Assert.assertEquals("permissions on directory are wrong",  
-        expectedPerms, fc.getFileStatus(f).getPermission());
+      Assertions.assertEquals(
+              expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on directory are wrong");
     Path fParent = fileContextTestHelper.getTestRootPath(fc, "NonExistant2");
-    Assert.assertEquals("permissions on parent directory are wrong",  
-        expectedParentPerms, fc.getFileStatus(fParent).getPermission());
+      Assertions.assertEquals(
+              expectedParentPerms, fc.getFileStatus(fParent).getPermission(), "permissions on parent directory are wrong");
   }
 
 
@@ -219,8 +220,8 @@
     fc.setUMask(umask);
     createFile(fc, f);
     Assert.assertTrue(isFile(fc, f));
-    Assert.assertEquals("permissions on file are wrong",  
-        expectedPerms , fc.getFileStatus(f).getPermission());
+      Assertions.assertEquals(
+              expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on file are wrong");
   }
   
   
@@ -233,10 +234,10 @@
     fc.setUMask(umask);
     createFile(fc, f);
     Assert.assertTrue(isFile(fc, f));
-    Assert.assertEquals("permissions on file are wrong",  
-        expectedFilePerms, fc.getFileStatus(f).getPermission());
-    Assert.assertEquals("permissions on parent directory are wrong",  
-        expectedDirPerms, fc.getFileStatus(fParent).getPermission());
+      Assertions.assertEquals(
+              expectedFilePerms, fc.getFileStatus(f).getPermission(), "permissions on file are wrong");
+      Assertions.assertEquals(
+              expectedDirPerms, fc.getFileStatus(fParent).getPermission(), "permissions on parent directory are wrong");
   }
  
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
index f8adf01..9006052 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
@@ -17,15 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.*;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.UUID;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -34,7 +25,23 @@
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.*;
+import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.UUID;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestGlobPaths {
 
@@ -65,7 +72,7 @@
   static private String USER_DIR;
   private final Path[] path = new Path[NUM_OF_PATHS];
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     dfsCluster = new MiniDFSCluster.Builder(conf).build();
@@ -81,7 +88,7 @@
     USER_DIR = fs.getHomeDirectory().toUri().getPath().toString();
   }
   
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if(dfsCluster!=null) {
       dfsCluster.shutdown();
@@ -102,8 +109,8 @@
     fs.createNewFile(fNormal);
     fs.createNewFile(fWithCR);
     statuses = fs.globStatus(new Path(d1, "f1*"));
-    assertEquals("Expected both normal and CR-carrying files in result: ",
-        2, statuses.length);
+      assertEquals(
+              2, statuses.length, "Expected both normal and CR-carrying files in result: ");
     cleanupDFS();
   }
 
@@ -892,14 +899,14 @@
       // Test simple glob
       FileStatus[] statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath()
           .toUri().getPath());
       // Test glob through symlink
       statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLink/*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath()
           .toUri().getPath());
       // If the terminal path component in a globbed path is a symlink,
       // we don't dereference that link.
@@ -907,20 +914,20 @@
           + "/alphaLink/betaLink"), false);
       statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/betaLi*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath()
           .toUri().getPath());
       // todo: test symlink-to-symlink-to-dir, etc.
     }
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobWithSymlinksOnFS() throws Exception {
     testOnFileSystem(new TestGlobWithSymlinks(false));
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobWithSymlinksOnFC() throws Exception {
     testOnFileContext(new TestGlobWithSymlinks(true));
@@ -951,20 +958,20 @@
       // Test glob through symlink to a symlink to a directory
       FileStatus statuses[] = wrap.globStatus(new Path(USER_DIR
           + "/alphaLinkLink"), new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath()
           .toUri().getPath());
       statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkLink/*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0]
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0]
           .getPath().toUri().getPath());
       // Test glob of dangling symlink (theta does not actually exist)
       wrap.createSymlink(new Path(USER_DIR + "theta"), new Path(USER_DIR
           + "/alpha/kappa"), false);
       statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/kappa/kappa"),
           new AcceptAllPathFilter());
-      Assert.assertNull(statuses);
+      Assertions.assertNull(statuses);
       // Test glob of symlinks
       wrap.createFile(USER_DIR + "/alpha/beta/gamma");
       wrap.createSymlink(new Path(USER_DIR + "gamma"), new Path(USER_DIR
@@ -975,12 +982,12 @@
           USER_DIR + "/alpha/beta/gammaLinkLinkLink"), false);
       statuses = wrap.globStatus(new Path(USER_DIR
           + "/alpha/*/gammaLinkLinkLink"), new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink",
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink",
           statuses[0].getPath().toUri().getPath());
       statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/beta/*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(USER_DIR + "/alpha/beta/gamma;" + USER_DIR
+      Assertions.assertEquals(USER_DIR + "/alpha/beta/gamma;" + USER_DIR
           + "/alpha/beta/gammaLink;" + USER_DIR + "/alpha/beta/gammaLinkLink;"
           + USER_DIR + "/alpha/beta/gammaLinkLinkLink",
           TestPath.mergeStatuses(statuses));
@@ -992,17 +999,17 @@
       statuses = wrap.globStatus(
           new Path(USER_DIR + "/tweedledee/unobtainium"),
           new AcceptAllPathFilter());
-      Assert.assertNull(statuses);
+      Assertions.assertNull(statuses);
     }
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobWithSymlinksToSymlinksOnFS() throws Exception {
     testOnFileSystem(new TestGlobWithSymlinksToSymlinks(false));
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobWithSymlinksToSymlinksOnFC() throws Exception {
     testOnFileContext(new TestGlobWithSymlinksToSymlinks(true));
@@ -1032,31 +1039,31 @@
       // PathFilter
       FileStatus statuses[] = wrap.globStatus(
           new Path(USER_DIR + "/alpha/beta"), new AcceptPathsEndingInZ());
-      Assert.assertNull(statuses);
+      Assertions.assertNull(statuses);
       statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkz/betaz"),
           new AcceptPathsEndingInZ());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath()
           .toUri().getPath());
       statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
           new AcceptPathsEndingInZ());
-      Assert.assertEquals(USER_DIR + "/alpha/betaz;" + USER_DIR
+      Assertions.assertEquals(USER_DIR + "/alpha/betaz;" + USER_DIR
           + "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses));
       statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(USER_DIR + "/alpha/beta;" + USER_DIR
+      Assertions.assertEquals(USER_DIR + "/alpha/beta;" + USER_DIR
           + "/alpha/betaz;" + USER_DIR + "/alphaLinkz/beta;" + USER_DIR
           + "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses));
     }
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobSymlinksWithCustomPathFilterOnFS() throws Exception {
     testOnFileSystem(new TestGlobSymlinksWithCustomPathFilter(false));
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobSymlinksWithCustomPathFilterOnFC() throws Exception {
     testOnFileContext(new TestGlobSymlinksWithCustomPathFilter(true));
@@ -1078,22 +1085,22 @@
           + "/alphaLink"), false);
       FileStatus statuses[] = wrap.globStatus(
           new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
+      Assertions.assertEquals(1, statuses.length);
       Path path = statuses[0].getPath();
-      Assert.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
-      Assert.assertEquals("hdfs", path.toUri().getScheme());
+      Assertions.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
+      Assertions.assertEquals("hdfs", path.toUri().getScheme());
 
       // FileContext can list a file:/// URI.
       // Since everyone should have the root directory, we list that.
       statuses = fc.util().globStatus(new Path("file:///"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
+      Assertions.assertEquals(1, statuses.length);
       Path filePath = statuses[0].getPath();
-      Assert.assertEquals("file", filePath.toUri().getScheme());
-      Assert.assertEquals("/", filePath.toUri().getPath());
+      Assertions.assertEquals("file", filePath.toUri().getScheme());
+      Assertions.assertEquals("/", filePath.toUri().getPath());
 
       // The FileSystem should have scheme 'hdfs'
-      Assert.assertEquals("hdfs", fs.getScheme());
+      Assertions.assertEquals("hdfs", fs.getScheme());
     }
   }
 
@@ -1176,16 +1183,15 @@
       try {
         wrap.globStatus(new Path("/no*/*"),
             new AcceptAllPathFilter());
-        Assert.fail("expected to get an AccessControlException when " +
+        Assertions.fail("expected to get an AccessControlException when " +
             "globbing through a directory we don't have permissions " +
             "to list.");
       } catch (AccessControlException ioe) {
       }
 
-      Assert.assertEquals("/norestrictions/val",
-        TestPath.mergeStatuses(wrap.globStatus(
-            new Path("/norestrictions/*"),
-                new AcceptAllPathFilter())));
+      Assertions.assertEquals("/norestrictions/val", TestPath.mergeStatuses(
+          wrap.globStatus(new Path("/norestrictions/*"),
+              new AcceptAllPathFilter())));
     }
   }
 
@@ -1209,9 +1215,8 @@
 
     void run() throws Exception {
       String reservedRoot = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
-      Assert.assertEquals(reservedRoot,
-        TestPath.mergeStatuses(wrap.
-            globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
+      Assertions.assertEquals(reservedRoot, TestPath.mergeStatuses(
+          wrap.globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
     }
   }
 
@@ -1240,8 +1245,8 @@
       privWrap.setOwner(new Path("/"), newOwner, null);
       FileStatus[] status = 
           wrap.globStatus(rootPath, new AcceptAllPathFilter());
-      Assert.assertEquals(1, status.length);
-      Assert.assertEquals(newOwner, status[0].getOwner());
+      Assertions.assertEquals(1, status.length);
+      Assertions.assertEquals(newOwner, status[0].getOwner());
       privWrap.setOwner(new Path("/"), oldRootStatus.getOwner(), null);
     }
   }
@@ -1273,8 +1278,8 @@
         FileStatus[] statuses =
             wrap.globStatus(new Path("/filed*/alpha"),
                   new AcceptAllPathFilter());
-        Assert.assertEquals(1, statuses.length);
-        Assert.assertEquals("/filed_away/alpha", statuses[0].getPath()
+        Assertions.assertEquals(1, statuses.length);
+        Assertions.assertEquals("/filed_away/alpha", statuses[0].getPath()
             .toUri().getPath());
         privWrap.mkdir(new Path("/filed_away/alphabet"),
             new FsPermission((short)0777), true);
@@ -1282,8 +1287,8 @@
             new FsPermission((short)0777), true);
         statuses = wrap.globStatus(new Path("/filed*/alph*/*b*"),
                   new AcceptAllPathFilter());
-        Assert.assertEquals(1, statuses.length);
-        Assert.assertEquals("/filed_away/alphabet/abc", statuses[0].getPath()
+        Assertions.assertEquals(1, statuses.length);
+        Assertions.assertEquals("/filed_away/alphabet/abc", statuses[0].getPath()
             .toUri().getPath());
       } finally {
         privWrap.delete(new Path("/filed"), true);
@@ -1308,12 +1313,12 @@
     FileSystem fs = FileSystem.getLocal(conf);
     String localTmp = System.getProperty("java.io.tmpdir");
     Path base = new Path(new Path(localTmp), UUID.randomUUID().toString());
-    Assert.assertTrue(fs.mkdirs(base));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "e")));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "c")));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "a")));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "d")));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "b")));
+    Assertions.assertTrue(fs.mkdirs(base));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "e")));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "c")));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "a")));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "d")));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "b")));
     fs.deleteOnExit(base);
     FileStatus[] status = fs.globStatus(new Path(base, "*"));
     ArrayList list = new ArrayList();
@@ -1321,7 +1326,7 @@
         list.add(f.getPath().toString());
     }
     boolean sorted = Ordering.natural().isOrdered(list);
-    Assert.assertTrue(sorted);
+    Assertions.assertTrue(sorted);
   }
 }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
index 8c37351..ed4a1d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
@@ -18,15 +18,6 @@
 
 package org.apache.hadoop.fs;
 
-import static org.apache.hadoop.fs.FileContextTestHelper.exists;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -36,12 +27,21 @@
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+
+import javax.security.auth.login.LoginException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import static org.apache.hadoop.fs.FileContextTestHelper.exists;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestHDFSFileContextMainOperations extends
     FileContextMainOperationsBaseTest {
@@ -54,7 +54,7 @@
     return new FileContextTestHelper("/tmp/TestHDFSFileContextMainOperations");
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
@@ -80,7 +80,7 @@
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -89,13 +89,13 @@
   }
   
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     super.setUp();
   }
   
   @Override
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
   }
@@ -134,16 +134,16 @@
 
     boolean isReady = fc.truncate(file, newLength);
 
-    Assert.assertTrue("Recovery is not expected.", isReady);
+      Assertions.assertTrue(isReady, "Recovery is not expected.");
 
     FileStatus fileStatus = fc.getFileStatus(file);
-    Assert.assertEquals(fileStatus.getLen(), newLength);
+    Assertions.assertEquals(fileStatus.getLen(), newLength);
     AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
 
     ContentSummary cs = fs.getContentSummary(dir);
-    Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
-        newLength * repl);
-    Assert.assertTrue(fs.delete(dir, true));
+      Assertions.assertEquals(cs.getSpaceConsumed(),
+              newLength * repl, "Bad disk space usage");
+    Assertions.assertTrue(fs.delete(dir, true));
   }
 
   @Test
@@ -280,8 +280,8 @@
     fs = cluster.getFileSystem();
     src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
     dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
-    Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
-    Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
+    Assertions.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
+    Assertions.assertTrue(fs.exists(dst1));    // ensure rename dst exists
   }
   
   /**
@@ -309,8 +309,8 @@
     fs = cluster.getFileSystem();
     src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
     dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
-    Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
-    Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
+    Assertions.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
+    Assertions.assertTrue(fs.exists(dst1));    // ensure rename dst exists
   }
 
   @Test
@@ -324,7 +324,7 @@
 
     for (String invalidName: invalidNames) {
       Assert.assertFalse(invalidName + " is not valid",
-        fc.getDefaultFileSystem().isValidName(invalidName));
+          fc.getDefaultFileSystem().isValidName(invalidName));
     }
   }
 
@@ -332,9 +332,9 @@
       boolean exception) throws Exception {
     DistributedFileSystem fs = cluster.getFileSystem();
     try {
-      Assert.assertEquals(renameSucceeds, fs.rename(src, dst));
+      Assertions.assertEquals(renameSucceeds, fs.rename(src, dst));
     } catch (Exception ex) {
-      Assert.assertTrue(exception);
+      Assertions.assertTrue(exception);
     }
     Assert.assertEquals(renameSucceeds, !exists(fc, src));
     Assert.assertEquals(renameSucceeds, exists(fc, dst));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
index 4ecca5e..da030d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.fs;
 
-import org.junit.Test;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.junit.jupiter.api.Test;
 
 public class TestHdfsNativeCodeLoader {
   static final Logger LOG =
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
index 05060af..bf4d4be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
@@ -19,14 +19,14 @@
 package org.apache.hadoop.fs;
 
 import java.io.File;
-import static org.junit.Assert.fail;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Set;
 
+import static org.junit.jupiter.api.Assertions.fail;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -39,10 +39,10 @@
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests whether FileContext can resolve an hdfs path that has a symlink to
@@ -53,7 +53,7 @@
   private static final FileContextTestHelper helper = new FileContextTestHelper();
   private static MiniDFSCluster cluster = null;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(
@@ -63,7 +63,7 @@
 
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -100,11 +100,11 @@
 
     Set<AbstractFileSystem> afsList = fcHdfs
         .resolveAbstractFileSystems(alphaHdfsPathViaLink);
-    Assert.assertEquals(2, afsList.size());
+    Assertions.assertEquals(2, afsList.size());
     for (AbstractFileSystem afs : afsList) {
       if ((!afs.equals(fcHdfs.getDefaultFileSystem()))
           && (!afs.equals(fcLocal.getDefaultFileSystem()))) {
-        Assert.fail("Failed to resolve AFS correctly");
+        Assertions.fail("Failed to resolve AFS correctly");
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
index 84bd98b..c1f0c74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
@@ -25,8 +25,8 @@
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import javax.security.auth.login.LoginException;
 import java.io.File;
@@ -57,7 +57,7 @@
   protected static final byte[] data = getFileData(numBlocks,
       getDefaultBlockSize());
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning()
       throws IOException, LoginException, URISyntaxException {
 
@@ -104,7 +104,7 @@
     return webhdfsUrl;
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
index fd81a1e..2c60bf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.net.URI;
@@ -38,9 +38,9 @@
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.event.Level;
 
 /**
@@ -84,7 +84,7 @@
     return e;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void beforeClassSetup() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.set(FsPermission.UMASK_LABEL, "000");
@@ -94,7 +94,7 @@
     dfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void afterClassTeardown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
index e5a5133..421c363 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java
index 2a3e4c3..d5f9c99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java
@@ -17,18 +17,18 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestSymlinkHdfsFileContext extends TestSymlinkHdfs {
 
   private static FileContext fc;
 
-  @BeforeClass
+  @BeforeAll
   public static void testSetup() throws Exception {
     fc = FileContext.getFileContext(cluster.getURI(0));
     wrapper = new FileContextTestWrapper(fc, "/tmp/TestSymlinkHdfsFileContext");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java
index fba9c42..3edaf4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java
@@ -17,28 +17,27 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
-import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
 
 public class TestSymlinkHdfsFileSystem extends TestSymlinkHdfs {
 
-  @BeforeClass
+  @BeforeAll
   public static void testSetup() throws Exception {
     wrapper = new FileSystemTestWrapper(dfs, "/tmp/TestSymlinkHdfsFileSystem");
   }
 
   @Override
-  @Ignore("FileSystem adds missing authority in absolute URIs")
+  @Disabled("FileSystem adds missing authority in absolute URIs")
   @Test(timeout=10000)
   public void testCreateWithPartQualPathFails() throws IOException {}
 
-  @Ignore("FileSystem#create creates parent directories," +
+  @Disabled("FileSystem#create creates parent directories," +
       " so dangling links to directories are created")
   @Override
   @Test(timeout=10000)
@@ -56,7 +55,7 @@
     wrapper.createSymlink(file, link, false);
     // Attempt recoverLease through a symlink
     boolean closed = dfs.recoverLease(link);
-    assertTrue("Expected recoverLease to return true", closed);
+      assertTrue(closed, "Expected recoverLease to return true");
   }
 
   @Test(timeout=10000)
@@ -69,7 +68,7 @@
     wrapper.createSymlink(file, link, false);
     // Attempt recoverLease through a symlink
     boolean closed = dfs.isFileClosed(link);
-    assertTrue("Expected isFileClosed to return true", closed);
+      assertTrue(closed, "Expected isFileClosed to return true");
   }
 
   @Test(timeout=10000)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
index feb77f8..18319a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
@@ -26,9 +26,9 @@
 import org.apache.hadoop.hdfs.PeerCache;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 
@@ -72,19 +72,19 @@
       // Read a byte.  This will trigger the creation of a block reader.
       stream.seek(2);
       int b = stream.read();
-      Assert.assertTrue(-1 != b);
+      Assertions.assertTrue(-1 != b);
 
       // The Peer cache should start off empty.
       PeerCache cache = dfs.getClient().getClientContext().getPeerCache();
-      Assert.assertEquals(0, cache.size());
+      Assertions.assertEquals(0, cache.size());
 
       // Unbuffer should clear the block reader and return the socket to the
       // cache.
       stream.unbuffer();
       stream.seek(2);
-      Assert.assertEquals(1, cache.size());
+      Assertions.assertEquals(1, cache.size());
       int b2 = stream.read();
-      Assert.assertEquals(b, b2);
+      Assertions.assertEquals(b, b2);
     } finally {
       if (stream != null) {
         IOUtils.cleanupWithLogger(null, stream);
@@ -117,7 +117,7 @@
       for (int i = 0; i < NUM_OPENS; i++) {
         streams[i] = dfs.open(TEST_PATH);
         LOG.info("opening file " + i + "...");
-        Assert.assertTrue(-1 != streams[i].read());
+        Assertions.assertTrue(-1 != streams[i].read());
         streams[i].unbuffer();
       }
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
index 5a04f67..231ffb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,8 +32,8 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.PathUtils;
 
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test of the URL stream handler.
@@ -48,7 +46,7 @@
   private static final FsUrlStreamHandlerFactory HANDLER_FACTORY
       = new FsUrlStreamHandlerFactory();
 
-  @BeforeClass
+  @BeforeAll
   public static void setupHandler() {
 
     // Setup our own factory
@@ -166,20 +164,20 @@
 
   @Test
   public void testHttpDefaultHandler() throws Throwable {
-    assertNull("Handler for HTTP is the Hadoop one",
-        HANDLER_FACTORY.createURLStreamHandler("http"));
+      assertNull(
+              HANDLER_FACTORY.createURLStreamHandler("http"), "Handler for HTTP is the Hadoop one");
   }
 
   @Test
   public void testHttpsDefaultHandler() throws Throwable {
-    assertNull("Handler for HTTPS is the Hadoop one",
-        HANDLER_FACTORY.createURLStreamHandler("https"));
+      assertNull(
+              HANDLER_FACTORY.createURLStreamHandler("https"), "Handler for HTTPS is the Hadoop one");
   }
 
   @Test
   public void testUnknownProtocol() throws Throwable {
-    assertNull("Unknown protocols are not handled",
-        HANDLER_FACTORY.createURLStreamHandler("gopher"));
+      assertNull(
+              HANDLER_FACTORY.createURLStreamHandler("gopher"), "Unknown protocols are not handled");
   }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
index 7544835..9314798 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
@@ -24,10 +24,10 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import javax.security.auth.login.LoginException;
 import java.io.IOException;
@@ -38,8 +38,8 @@
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize;
 import static org.apache.hadoop.fs.FileContextTestHelper.getFileData;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test of FileContext apis on Webhdfs.
@@ -71,7 +71,7 @@
     return webhdfsUrl;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning()
       throws IOException, LoginException, URISyntaxException {
 
@@ -85,7 +85,7 @@
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     URI webhdfsUrlReal = getWebhdfsUrl();
     Path testBuildData = new Path(
@@ -153,7 +153,7 @@
     assertArrayEquals(data, bb);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java
index 74b9a35..e4e481f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java
@@ -27,7 +27,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 
 import java.io.IOException;
 
@@ -73,14 +73,14 @@
   @Override
   public void init() throws IOException {
     super.init();
-    Assert.assertTrue("contract options not loaded",
-                      isSupported(ContractOptions.IS_CASE_SENSITIVE, false));
+      Assertions.assertTrue(
+              isSupported(ContractOptions.IS_CASE_SENSITIVE, false), "contract options not loaded");
   }
 
   @Override
   public FileSystem getTestFileSystem() throws IOException {
-    //assumes cluster is not null
-    Assert.assertNotNull("cluster not created", cluster);
+      //assumes cluster is not null
+      Assertions.assertNotNull(cluster, "cluster not created");
     return cluster.getFileSystem();
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
index 897354c..ed3cead 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
@@ -17,19 +17,19 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractAppend extends AbstractContractAppendTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
index 05587ce..ddb5cc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,14 +31,14 @@
  */
 public class TestHDFSContractConcat extends AbstractContractConcatTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
     // perform a simple operation on the cluster to verify it is up
     HDFSContract.getCluster().getFileSystem().getDefaultBlockSize();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
index b209bf1..8c9cbaf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
@@ -21,19 +21,19 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractCreate extends AbstractContractCreateTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
index 4dc4af0..47fbf30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@
  */
 public class TestHDFSContractDelete extends AbstractContractDeleteTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java
index d81d3c2..81b0440 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java
@@ -21,20 +21,20 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractGetFileStatus extends
     AbstractContractGetFileStatusTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
index 053429d..fae15f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@
  */
 public class TestHDFSContractMkdir extends AbstractContractMkdirTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java
index 0efb33f..6f532da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java
@@ -19,14 +19,14 @@
 
 import java.io.IOException;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.jupiter.api.AfterAll;
 
 /**
  * Test MultipartUploader tests on HDFS.
@@ -37,12 +37,12 @@
   protected static final Logger LOG =
       LoggerFactory.getLogger(TestHDFSContractMultipartUploader.class);
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
index 0d9e810..3a11f1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@
  */
 public class TestHDFSContractOpen extends AbstractContractOpenTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
index c65a60b..46a7872 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -35,15 +35,15 @@
 
   public TestHDFSContractPathHandle(String testname, Options.HandleOpt[] opts,
       boolean serialized) {
-    super(testname, opts, serialized);
+      super(testname, opts, serialized);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
index 706b0cf..f96dfae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
@@ -21,19 +21,19 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractRename extends AbstractContractRenameTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
index fc1851d..dc9fdd7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@
 public class TestHDFSContractRootDirectory extends
     AbstractContractRootDirectoryTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
index 259ffce..b12d6aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@
  */
 public class TestHDFSContractSeek extends AbstractContractSeekTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java
index 4899189..8ae002a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java
@@ -21,19 +21,19 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractSetTimes extends AbstractContractSetTimesTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java
index 54b8bf1..7ee5996 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java
@@ -21,20 +21,19 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractUnbuffer extends AbstractContractUnbufferTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
index 6c7bac3..dcca1b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.fs.loadGenerator;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -33,7 +33,8 @@
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
 /**
  * This class tests if a balancer schedules tasks correctly.
  */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
index 96d15e5..7c7d17b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
@@ -22,10 +22,7 @@
 import static org.apache.hadoop.fs.permission.AclEntryType.USER;
 import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -45,10 +42,10 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -66,7 +63,7 @@
   private static FileSystem hdfsAsUser1;
   private static FileSystem hdfsAsUser2;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
@@ -85,7 +82,7 @@
     assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     if (hdfs != null) {
       for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
@@ -94,7 +91,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws Exception {
     IOUtils.cleanupWithLogger(null, hdfs, hdfsAsUser1, hdfsAsUser2);
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
index 23de658..2e85b33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.fs.shell;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -33,9 +33,9 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 
 /**
@@ -48,7 +48,7 @@
   private static MiniDFSCluster cluster;
   private static FileSystem fs;
   
-  @Before
+  @BeforeEach
     public void setUp() throws IOException{
     Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).build();
@@ -56,7 +56,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
     public void tearDown() throws IOException{
     if(fs != null){
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java
index bbdbd5a..c71b7d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java
@@ -24,9 +24,9 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Tests that the NN startup is successful with ViewFSOverloadScheme.
@@ -37,7 +37,7 @@
   private static final String HDFS_SCHEME = "hdfs";
   private static final Configuration CONF = new Configuration();
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() {
     CONF.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
@@ -78,7 +78,7 @@
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java
index 5e2f42b..f0221bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Tests ViewFileSystemOverloadScheme with configured mount links.
@@ -35,7 +35,7 @@
   private Path oldVersionMountTablePath;
   private Path newVersionMountTablePath;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws IOException {
     super.setUp();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
index 9b2953c..1e7cfbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
@@ -29,9 +29,9 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Make sure that ViewFileSystem works when the root of an FS is mounted to a
@@ -48,7 +48,7 @@
     return new FileSystemTestHelper("/tmp/TestViewFileSystemAtHdfsRoot");
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -63,7 +63,7 @@
     fHdfs = cluster.getFileSystem();
   }
       
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -71,7 +71,7 @@
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fHdfs;
     super.setUp();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fcb5257..f59060e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -41,7 +41,6 @@
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -56,12 +55,9 @@
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import static org.junit.Assert.*;
+import org.junit.jupiter.api.*;
+
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -85,7 +81,7 @@
     return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs");
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
 
@@ -129,7 +125,7 @@
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -137,7 +133,7 @@
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // create the test root on local_fs
     fsTarget = fHdfs;
@@ -147,7 +143,7 @@
   }
 
   @Override
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
   }
@@ -204,13 +200,13 @@
 
     //Verify file deletion within EZ
     DFSTestUtil.verifyDelete(shell, fsTarget, encFile, true);
-    assertTrue("ViewFileSystem trash roots should include EZ file trash",
-        (fsView.getTrashRoots(true).size() == 1));
+    assertEquals(1, fsView.getTrashRoots(true).size(),
+        "ViewFileSystem trash roots should include EZ file trash");
 
     //Verify deletion of EZ
     DFSTestUtil.verifyDelete(shell, fsTarget, zone, true);
-    assertTrue("ViewFileSystem trash roots should include EZ zone trash",
-        (fsView.getTrashRoots(true).size() == 2));
+    assertEquals(2, fsView.getTrashRoots(true).size(),
+        "ViewFileSystem trash roots should include EZ zone trash");
   }
 
   @Test
@@ -253,15 +249,15 @@
         viewFs.getFileChecksum(mountDataFilePath);
     FileChecksum fileChecksumViaTargetFs =
         fsTarget.getFileChecksum(fsTargetFilePath);
-    assertTrue("File checksum not matching!",
-        fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
+    assertTrue(fileChecksumViaViewFs.equals(fileChecksumViaTargetFs),
+        "File checksum not matching!");
 
     fileChecksumViaViewFs =
         viewFs.getFileChecksum(mountDataFilePath, fileLength / 2);
     fileChecksumViaTargetFs =
         fsTarget.getFileChecksum(fsTargetFilePath, fileLength / 2);
-    assertTrue("File checksum not matching!",
-        fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
+    assertTrue(fileChecksumViaViewFs.equals(fileChecksumViaTargetFs),
+        "File checksum not matching!");
   }
 
   //Rename should fail on across different fileSystems
@@ -276,7 +272,7 @@
     fsView.create(filePath);
     try {
       fsView.rename(filePath, hdfFilepath);
-      ContractTestUtils.fail("Should thrown IOE on Renames across filesytems");
+      Assertions.fail("Should thrown IOE on Renames across filesytems");
     } catch (IOException e) {
       GenericTestUtils
           .assertExceptionContains("Renames across Mount points not supported",
@@ -325,7 +321,7 @@
     // 1. test mkdirs
     final Path testDir = new Path("testdir1/sub1/sub3");
     final Path testDir_tmp = new Path("testdir1/sub1/sub3_temp");
-    assertTrue(testDir + ": Failed to create!", nfly.mkdirs(testDir));
+      assertTrue(nfly.mkdirs(testDir), testDir + ": Failed to create!");
 
     // Test renames
     assertTrue(nfly.rename(testDir, testDir_tmp));
@@ -333,7 +329,7 @@
 
     for (final URI testUri : testUris) {
       final FileSystem fs = FileSystem.get(testUri, testConf);
-      assertTrue(testDir + " should exist!", fs.exists(testDir));
+        assertTrue(fs.exists(testDir), testDir + " should exist!");
     }
 
     // 2. test write
@@ -349,7 +345,7 @@
       final FileSystem fs = FileSystem.get(testUri, testConf);
       final FSDataInputStream fsdis = fs.open(testFile);
       try {
-        assertEquals("Wrong file content", testString, fsdis.readUTF());
+          assertEquals(testString, fsdis.readUTF(), "Wrong file content");
       } finally {
         fsdis.close();
       }
@@ -364,7 +360,7 @@
       FSDataInputStream fsDis = null;
       try {
         fsDis = nfly.open(testFile);
-        assertEquals("Wrong file content", testString, fsDis.readUTF());
+          assertEquals(testString, fsDis.readUTF(), "Wrong file content");
       } finally {
         IOUtils.cleanupWithLogger(LOG, fsDis);
         cluster.restartNameNode(i);
@@ -378,7 +374,7 @@
     FSDataInputStream fsDis = null;
     try {
       fsDis = nfly.open(testFile);
-      assertEquals("Wrong file content", testString, fsDis.readUTF());
+        assertEquals(testString, fsDis.readUTF(), "Wrong file content");
       assertTrue(fs1.exists(testFile));
     } finally {
       IOUtils.cleanupWithLogger(LOG, fsDis);
@@ -393,18 +389,18 @@
       for (final URI testUri : testUris) {
         final FileSystem fs = FileSystem.get(testUri, conf);
         fs.setTimes(testFile, 1L, 1L);
-        assertEquals(testUri + "Set mtime failed!", 1L,
-            fs.getFileStatus(testFile).getModificationTime());
-        assertEquals("nfly file status wrong", expectedMtime,
-            nfly.getFileStatus(testFile).getModificationTime());
+          assertEquals(1L,
+                  fs.getFileStatus(testFile).getModificationTime(), testUri + "Set mtime failed!");
+          assertEquals(expectedMtime,
+                  nfly.getFileStatus(testFile).getModificationTime(), "nfly file status wrong");
         FSDataInputStream fsDis2 = null;
         try {
           fsDis2 = nfly.open(testFile);
-          assertEquals("Wrong file content", testString, fsDis2.readUTF());
-          // repair is done, now trying via normal fs
-          //
-          assertEquals("Repair most recent failed!", expectedMtime,
-              fs.getFileStatus(testFile).getModificationTime());
+            assertEquals(testString, fsDis2.readUTF(), "Wrong file content");
+            // repair is done, now trying via normal fs
+            //
+            assertEquals(expectedMtime,
+                    fs.getFileStatus(testFile).getModificationTime(), "Repair most recent failed!");
         } finally {
           IOUtils.cleanupWithLogger(LOG, fsDis2);
         }
@@ -476,7 +472,7 @@
     FileSystem otherfs = map.get("user1");
     otherfs.mkdirs(user1Path);
     String owner = otherfs.getFileStatus(user1Path).getOwner();
-    assertEquals("The owner did not match ", owner, userUgi.getShortUserName());
+      assertEquals(owner, userUgi.getShortUserName(), "The owner did not match ");
     otherfs.delete(user1Path, false);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java
index e731760..d97b4d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java
@@ -18,11 +18,7 @@
 package org.apache.hadoop.fs.viewfs;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -45,11 +41,11 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,7 +75,7 @@
     return new FileSystemTestHelper(TEST_BASE_PATH);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -100,7 +96,7 @@
         Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null);
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -108,7 +104,7 @@
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fsDefault;
     super.setUp();
@@ -179,10 +175,10 @@
     FileStatus baseFileRelStat = vfs.getFileStatus(new Path(viewFsUri.toString()
         + testBaseFileRelative.toUri().toString()));
     LOG.info("BaseFileRelStat: " + baseFileRelStat);
-    Assert.assertEquals("Unexpected file length for " + testBaseFile,
-        1, baseFileStat.getLen());
-    Assert.assertEquals("Unexpected file length for " + testBaseFileRelative,
-        baseFileStat.getLen(), baseFileRelStat.getLen());
+      Assertions.assertEquals(
+              1, baseFileStat.getLen(), "Unexpected file length for " + testBaseFile);
+      Assertions.assertEquals(
+              baseFileStat.getLen(), baseFileRelStat.getLen(), "Unexpected file length for " + testBaseFileRelative);
     FileStatus level2FileStat = vfs.getFileStatus(new Path(viewFsUri.toString()
         + testLevel2File.toUri().toString()));
     LOG.info("Level2FileStat: " + level2FileStat);
@@ -228,8 +224,8 @@
     FileStatus baseFileStat = vfs.getFileStatus(
         new Path(viewFsUri.toString() + testBaseFile.toUri().toString()));
     LOG.info("BaseFileStat: " + baseFileStat);
-    Assert.assertEquals("Unexpected file length for " + testBaseFile,
-        0, baseFileStat.getLen());
+      Assertions.assertEquals(
+              0, baseFileStat.getLen(), "Unexpected file length for " + testBaseFile);
     FileStatus level2FileStat = vfs.getFileStatus(new Path(viewFsUri.toString()
         + testLevel2File.toUri().toString()));
     LOG.info("Level2FileStat: " + level2FileStat);
@@ -240,8 +236,8 @@
 
     FileStatus level2FileStatAfterWrite = vfs.getFileStatus(
         new Path(viewFsUri.toString() + testLevel2File.toUri().toString()));
-    Assert.assertTrue("Unexpected file length for " + testLevel2File,
-        level2FileStatAfterWrite.getLen() > level2FileStat.getLen());
+      Assertions.assertTrue(
+              level2FileStatAfterWrite.getLen() > level2FileStat.getLen(), "Unexpected file length for " + testLevel2File);
 
     vfs.close();
   }
@@ -265,8 +261,8 @@
       FileSystem.get(viewFsUri, conf);
       fail("Shouldn't allow linkMergeSlash to take extra mount points!");
     } catch (IOException e) {
-      assertTrue("Unexpected error: " + e.getMessage(),
-          e.getMessage().contains(expectedErrorMsg));
+        assertTrue(
+                e.getMessage().contains(expectedErrorMsg), "Unexpected error: " + e.getMessage());
     }
   }
 
@@ -299,13 +295,13 @@
         afterFallback.add(stat.getPath());
       }
       afterFallback.removeAll(beforeFallback);
-      assertTrue("Listing didn't include fallback link",
-          afterFallback.size() == 1);
+        assertTrue(
+                afterFallback.size() == 1, "Listing didn't include fallback link");
       Path[] fallbackArray = new Path[afterFallback.size()];
       afterFallback.toArray(fallbackArray);
       Path expected = new Path(viewFsUri.toString(), "dir1");
-      assertEquals("Path did not match",
-          expected, fallbackArray[0]);
+        assertEquals(
+                expected, fallbackArray[0], "Path did not match");
 
       // Create a directory using the returned fallback path and verify
       Path childDir = new Path(fallbackArray[0], "child");
@@ -349,13 +345,13 @@
         afterFallback.add(stat.getPath());
       }
       afterFallback.removeAll(beforeFallback);
-      assertEquals("The same directory name in fallback link should be shaded",
-          1, afterFallback.size());
+        assertEquals(
+                1, afterFallback.size(), "The same directory name in fallback link should be shaded");
       Path[] fallbackArray = new Path[afterFallback.size()];
       // Only user1 should be listed as fallback link
       Path expected = new Path(viewFsDefaultClusterUri.toString(), "user1");
-      assertEquals("Path did not match",
-          expected, afterFallback.toArray(fallbackArray)[0]);
+        assertEquals(
+                expected, afterFallback.toArray(fallbackArray)[0], "Path did not match");
 
       // Create a directory using the returned fallback path and verify
       Path childDir = new Path(fallbackArray[0], "child");
@@ -430,8 +426,8 @@
       }
       //viewfs://default/user1/hive/warehouse
       afterFallback.removeAll(beforeFallback);
-      assertEquals("The same directory name in fallback link should be shaded",
-          1, afterFallback.size());
+        assertEquals(
+                1, afterFallback.size(), "The same directory name in fallback link should be shaded");
     }
   }
 
@@ -502,8 +498,8 @@
         }
       }
       afterFallback.removeAll(beforeFallback);
-      assertEquals("Just to make sure paths are same.", 0,
-          afterFallback.size());
+        assertEquals(0,
+                afterFallback.size(), "Just to make sure paths are same.");
     }
   }
 
@@ -559,14 +555,14 @@
           assertEquals(FsPermission.valueOf("-rwxr--rw-"),
               stat.getPermission());
         } else {
-          assertEquals("Path is: " + stat.getPath(),
-              FsPermission.valueOf("-rwxr--r--"), stat.getPermission());
+            assertEquals(
+                    FsPermission.valueOf("-rwxr--r--"), stat.getPermission(), "Path is: " + stat.getPath());
         }
       }
       afterFallback.removeAll(beforeFallback);
       assertEquals(1, afterFallback.size());
-      assertEquals("/user2 dir from fallback should be listed.", "user2",
-          afterFallback.iterator().next().getName());
+        assertEquals("user2",
+                afterFallback.iterator().next().getName(), "/user2 dir from fallback should be listed.");
     }
   }
 
@@ -908,7 +904,7 @@
       assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir)));
       try {
         vfs.create(vfsTestDir);
-        Assert.fail("Should fail to create file as this is an internal dir.");
+        Assertions.fail("Should fail to create file as this is an internal dir.");
       } catch (NotInMountpointException e){
         // This tree is part of internal tree. The above exception will be
         // thrown from getDefaultReplication, getDefaultBlockSize APIs which was
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java
index 606743f..0c668dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java
@@ -34,17 +34,11 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import javax.security.auth.login.LoginException;
 
@@ -74,7 +68,7 @@
     return new FileSystemTestHelper(TEST_TEMP_PATH);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -93,7 +87,7 @@
     fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -101,7 +95,7 @@
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fsDefault;
     super.setUp();
@@ -191,9 +185,9 @@
       fail("Shouldn't allow both merge slash link and regular link on same "
           + "mount table.");
     } catch (IOException e) {
-      assertTrue("Unexpected error message: " + e.getMessage(),
-          e.getMessage().contains(expectedErrorMsg1) || e.getMessage()
-              .contains(expectedErrorMsg2));
+        assertTrue(
+                e.getMessage().contains(expectedErrorMsg1) || e.getMessage()
+                        .contains(expectedErrorMsg2), "Unexpected error message: " + e.getMessage());
     }
   }
 
@@ -226,9 +220,9 @@
         LINK_MERGE_SLASH_CLUSTER_1_NAME, "/", null, null);
     FileSystem fs = FileSystem.get(viewFsUri, conf);
     FileSystem[] childFs = fs.getChildFileSystems();
-    Assert.assertEquals("Unexpected number of child filesystems!",
-        1, childFs.length);
-    Assert.assertEquals("Unexpected child filesystem!",
-        DistributedFileSystem.class, childFs[0].getClass());
+      Assertions.assertEquals(
+              1, childFs.length, "Unexpected number of child filesystems!");
+      Assertions.assertEquals(
+              DistributedFileSystem.class, childFs[0].getClass(), "Unexpected child filesystem!");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java
index d3afa47..4e2f34f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java
@@ -36,16 +36,12 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.viewfs.RegexMountPoint.INTERCEPTOR_INTERNAL_SEP;
-import static org.junit.Assert.assertSame;
+import static org.junit.jupiter.api.Assertions.assertSame;
 
 /**
  * Test linkRegex node type for view file system.
@@ -73,7 +69,7 @@
     return new FileSystemTestHelper(TEST_BASE_PATH);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     SupportsBlocks = true;
     clusterConfig = ViewFileSystemTestSetup.createConfig();
@@ -91,7 +87,7 @@
     fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -99,7 +95,7 @@
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fsDefault;
     super.setUp();
@@ -157,7 +153,7 @@
   private void createDirWithChildren(
       FileSystem fileSystem, Path dir, List<Path> childrenFiles)
       throws IOException {
-    Assert.assertTrue(fileSystem.mkdirs(dir));
+    Assertions.assertTrue(fileSystem.mkdirs(dir));
     int index = 0;
     for (Path childFile : childrenFiles) {
       createFile(fileSystem, childFile, index, true);
@@ -224,11 +220,11 @@
     URI viewFsUri = new URI(
         FsConstants.VIEWFS_SCHEME, CLUSTER_NAME, "/", null, null);
     try (FileSystem vfs = FileSystem.get(viewFsUri, config)) {
-      Assert.assertEquals(expectedResolveResult.toString(),
+      Assertions.assertEquals(expectedResolveResult.toString(),
           vfs.resolvePath(dirPathBeforeMountPoint).toString());
-      Assert.assertTrue(
+      Assertions.assertTrue(
           vfs.getFileStatus(dirPathBeforeMountPoint).isDirectory());
-      Assert.assertEquals(
+      Assertions.assertEquals(
           childrenFilesCnt, vfs.listStatus(dirPathBeforeMountPoint).length);
 
       // Test Inner cache, the resolved result's filesystem should be the same.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java
index dcfa051..714a08c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java
@@ -19,8 +19,6 @@
 
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
-import static org.junit.Assume.assumeTrue;
-
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
@@ -39,11 +37,12 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
 
 /**
  * Tests ViewFileSystemOverloadScheme with file system contract tests.
@@ -55,7 +54,7 @@
   private static String defaultWorkingDirectory;
   private static Configuration conf = new HdfsConfiguration();
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws IOException {
     final File basedir = GenericTestUtils.getRandomizedTestDir();
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
@@ -67,7 +66,7 @@
         "/user/" + UserGroupInformation.getCurrentUser().getShortUserName();
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf.set(String.format("fs.%s.impl", "hdfs"),
         ViewFileSystemOverloadScheme.class.getName());
@@ -89,7 +88,7 @@
     fs = FileSystem.get(conf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownAfter() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -117,7 +116,7 @@
   @Override
   @Test
   public void testListStatusRootDir() throws Throwable {
-    assumeTrue(rootDirTestEnabled());
+    Assumptions.assumeTrue(rootDirTestEnabled());
     Path dir = path("/");
     Path child = path("/FileSystemContractBaseTest");
     try (FileSystem dfs = ((ViewFileSystemOverloadScheme) fs).getRawFileSystem(
@@ -129,7 +128,7 @@
   }
 
   @Override
-  @Ignore // This test same as above in this case.
+  @Disabled // This test same as above in this case.
   public void testLSRootDir() throws Throwable {
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java
index 650a472..e252bcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java
@@ -39,16 +39,16 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 
 /**
@@ -67,7 +67,7 @@
   private static final String HDFS_USER_FOLDER = "/HDFSUser";
   private static final String LOCAL_FOLDER = "/local";
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws IOException {
     cluster =
         new MiniDFSCluster.Builder(new Configuration()).numDataNodes(2).build();
@@ -77,7 +77,7 @@
   /**
    * Sets up the configurations and starts the MiniDFSCluster.
    */
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     Configuration config = getNewConf();
     config.setInt(
@@ -91,10 +91,10 @@
         URI.create(config.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
     localTargetDir = new File(TEST_ROOT_DIR, "/root/");
     localTargetDir.mkdirs();
-    Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
+    Assertions.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
   }
 
-  @After
+  @AfterEach
   public void cleanUp() throws IOException {
     if (cluster != null) {
       FileSystem fs = new DistributedFileSystem();
@@ -102,7 +102,7 @@
       try {
         FileStatus[] statuses = fs.listStatus(new Path("/"));
         for (FileStatus st : statuses) {
-          Assert.assertTrue(fs.delete(st.getPath(), true));
+          Assertions.assertTrue(fs.delete(st.getPath(), true));
         }
       } finally {
         fs.close();
@@ -111,7 +111,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     if (cluster != null) {
       FileSystem.closeAll();
@@ -154,7 +154,7 @@
 
     try (FileSystem fs
         =  FileSystem.get(conf)) {
-      Assert.assertEquals(2, fs.getChildFileSystems().length);
+      Assertions.assertEquals(2, fs.getChildFileSystems().length);
       fs.createNewFile(hdfsFile); // /HDFSUser/testfile
       fs.mkdirs(localDir); // /local/test
     }
@@ -162,20 +162,20 @@
     // Initialize HDFS and test files exist in ls or not
     try (DistributedFileSystem dfs = new DistributedFileSystem()) {
       dfs.initialize(defaultFSURI, conf);
-      Assert.assertTrue(dfs.exists(
+      Assertions.assertTrue(dfs.exists(
           new Path(Path.getPathWithoutSchemeAndAuthority(hdfsTargetPath),
               hdfsFile.getName()))); // should be in hdfs.
-      Assert.assertFalse(dfs.exists(
+      Assertions.assertFalse(dfs.exists(
           new Path(Path.getPathWithoutSchemeAndAuthority(localTragetPath),
               localDir.getName()))); // should not be in local fs.
     }
 
     try (RawLocalFileSystem lfs = new RawLocalFileSystem()) {
       lfs.initialize(localTragetPath.toUri(), conf);
-      Assert.assertFalse(lfs.exists(
+      Assertions.assertFalse(lfs.exists(
           new Path(Path.getPathWithoutSchemeAndAuthority(hdfsTargetPath),
               hdfsFile.getName()))); // should not be in hdfs.
-      Assert.assertTrue(lfs.exists(
+      Assertions.assertTrue(lfs.exists(
           new Path(Path.getPathWithoutSchemeAndAuthority(localTragetPath),
               localDir.getName()))); // should be in local fs.
     }
@@ -210,7 +210,7 @@
       });
     } else {
       try (FileSystem fs = FileSystem.get(conf)) {
-        Assert.assertEquals("hdfs", fs.getScheme());
+        Assertions.assertEquals("hdfs", fs.getScheme());
       }
     }
   }
@@ -241,14 +241,14 @@
     try (FileSystem fs = FileSystem.get(conf)) {
       fs.mkdirs(hdfsTargetPath);
       FileStatus[] ls = fs.listStatus(new Path("/"));
-      Assert.assertEquals(2, ls.length);
+      Assertions.assertEquals(2, ls.length);
       String lsPath1 =
           Path.getPathWithoutSchemeAndAuthority(ls[0].getPath()).toString();
       String lsPath2 =
           Path.getPathWithoutSchemeAndAuthority(ls[1].getPath()).toString();
-      Assert.assertTrue(
+      Assertions.assertTrue(
           HDFS_USER_FOLDER.equals(lsPath1) || LOCAL_FOLDER.equals(lsPath1));
-      Assert.assertTrue(
+      Assertions.assertTrue(
           HDFS_USER_FOLDER.equals(lsPath2) || LOCAL_FOLDER.equals(lsPath2));
     }
   }
@@ -270,7 +270,7 @@
 
     try (FileSystem fs = FileSystem.get(conf)) {
       fs.listStatus(new Path("/nonMount"));
-      Assert.fail("It should fail as no mount link with /nonMount");
+      Assertions.fail("It should fail as no mount link with /nonMount");
     }
   }
 
@@ -349,8 +349,8 @@
     try (FileSystem fs = FileSystem.get(conf)) {
       fs.createNewFile(new Path("/nonMount/myfile"));
       FileStatus[] ls = fs.listStatus(new Path("/nonMount"));
-      Assert.assertEquals(1, ls.length);
-      Assert.assertEquals(
+      Assertions.assertEquals(1, ls.length);
+      Assertions.assertEquals(
           Path.getPathWithoutSchemeAndAuthority(ls[0].getPath()).getName(),
           "myfile");
     }
@@ -376,7 +376,7 @@
             localTargetDir.toURI().toString()}, conf);
     try (FileSystem fs = FileSystem.get(conf)) {
       if (fallbackExist) {
-        Assert.assertTrue(fs.createNewFile(new Path("/newFileOnRoot")));
+        Assertions.assertTrue(fs.createNewFile(new Path("/newFileOnRoot")));
       } else {
         LambdaTestUtils.intercept(NotInMountpointException.class, () -> {
           fs.createNewFile(new Path("/newFileOnRoot"));
@@ -422,7 +422,7 @@
 
     try (FileSystem fs = FileSystem.get(conf)) {
       fs.createNewFile(new Path("/onRootWhenFallBack"));
-      Assert.fail("OverloadScheme target fs should be valid.");
+      Assertions.fail("OverloadScheme target fs should be valid.");
     }
   }
 
@@ -446,7 +446,7 @@
     try (FileSystem fs = FileSystem.get(conf)) {
       Path testFile = new Path(HDFS_USER_FOLDER + "/testFile");
       fs.createNewFile(testFile);
-      Assert.assertTrue(fs.exists(testFile));
+      Assertions.assertTrue(fs.exists(testFile));
     }
   }
 
@@ -470,13 +470,13 @@
 
     // 1. Only 1 hdfs child file system should be there with cache.
     try (FileSystem vfs = FileSystem.get(conf)) {
-      Assert.assertEquals(1, vfs.getChildFileSystems().length);
+      Assertions.assertEquals(1, vfs.getChildFileSystems().length);
     }
 
     // 2. Two hdfs file systems should be there if no cache.
     conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
     try (FileSystem vfs = FileSystem.get(conf)) {
-      Assert.assertEquals(isFallBackExist(conf) ? 3 : 2,
+      Assertions.assertEquals(isFallBackExist(conf) ? 3 : 2,
           vfs.getChildFileSystems().length);
     }
   }
@@ -509,7 +509,7 @@
     conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
     // Two hdfs file systems should be there if no cache.
     try (FileSystem vfs = FileSystem.get(conf)) {
-      Assert.assertEquals(isFallBackExist(conf) ? 3 : 2,
+      Assertions.assertEquals(isFallBackExist(conf) ? 3 : 2,
           vfs.getChildFileSystems().length);
     }
   }
@@ -537,7 +537,7 @@
     // cache should work.
     conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
     try (FileSystem vfs = FileSystem.get(conf)) {
-      Assert.assertEquals(isFallBackExist(conf) ? 2 : 1,
+      Assertions.assertEquals(isFallBackExist(conf) ? 2 : 1,
           vfs.getChildFileSystems().length);
     }
   }
@@ -561,7 +561,7 @@
 
     final Path testDir = new Path("/nflyroot/testdir1/sub1/sub3");
     final Path testDirTmp = new Path("/nflyroot/testdir1/sub1/sub3_temp");
-    assertTrue(testDir + ": Failed to create!", nfly.mkdirs(testDir));
+      assertTrue(nfly.mkdirs(testDir), testDir + ": Failed to create!");
 
     // Test renames
     assertTrue(nfly.rename(testDir, testDirTmp));
@@ -570,7 +570,7 @@
     final URI[] testUris = new URI[] {uri1, uri2 };
     for (final URI testUri : testUris) {
       final FileSystem fs = FileSystem.get(testUri, conf);
-      assertTrue(testDir + " should exist!", fs.exists(testDir));
+        assertTrue(fs.exists(testDir), testDir + " should exist!");
     }
   }
 
@@ -688,7 +688,7 @@
   private void readString(final FileSystem nfly, final Path testFile,
       final String testString, final URI testUri) throws IOException {
     try (FSDataInputStream fsDis = nfly.open(testFile)) {
-      assertEquals("Wrong file content", testString, fsDis.readUTF());
+        assertEquals(testString, fsDis.readUTF(), "Wrong file content");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
index 10b6f17..bb3eb30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
@@ -28,11 +28,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 
 import java.io.IOException;
 import java.util.List;
@@ -42,8 +38,8 @@
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify ACL through ViewFileSystem functionality.
@@ -61,7 +57,7 @@
   private FileSystemTestHelper fileSystemTestHelper =
       new FileSystemTestHelper("/tmp/TestViewFileSystemWithAcls");
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(clusterConf)
@@ -74,14 +70,14 @@
     fHdfs2 = cluster.getFileSystem(1);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fHdfs;
     fsTarget2 = fHdfs2;
@@ -105,7 +101,7 @@
     ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
     fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
index 2f44b46..43a2288 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
@@ -30,13 +30,13 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify truncate through ViewFileSystem functionality.
@@ -53,7 +53,7 @@
   private FileSystemTestHelper fileSystemTestHelper =
       new FileSystemTestHelper("/tmp/TestViewFileSystemWithXAttrs");
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     cluster = new MiniDFSCluster.Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -63,14 +63,14 @@
     fHdfs = cluster.getFileSystem(0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fHdfs;
     targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
@@ -89,7 +89,7 @@
         .addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
index b487188..89a000f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
@@ -24,16 +24,12 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 
 import java.io.IOException;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify XAttrs through ViewFileSystem functionality.
@@ -57,7 +53,7 @@
   protected static final String name2 = "user.a2";
   protected static final byte[] value2 = {0x37, 0x38, 0x39};
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     cluster = new MiniDFSCluster.Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -69,14 +65,14 @@
     fHdfs2 = cluster.getFileSystem(1);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fHdfs;
     fsTarget2 = fHdfs2;
@@ -102,7 +98,7 @@
         targetTestRoot2.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
     fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
index 8866465..7b95e9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
@@ -30,9 +30,9 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Make sure that ViewFs works when the root of an FS is mounted to a ViewFs
@@ -49,7 +49,7 @@
     return new FileContextTestHelper("/tmp/TestViewFsAtHdfsRoot");
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -62,7 +62,7 @@
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -70,7 +70,7 @@
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // create the test root on local_fs
     fcTarget = fc;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
index a49735c..60ee801 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
@@ -28,9 +28,7 @@
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.URI;
@@ -49,9 +47,9 @@
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests for viewfs implementation of default fs level values.
@@ -74,7 +72,7 @@
   // Use NotInMountpoint path to trigger the exception
   private static Path notInMountpointPath;
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
 
@@ -218,7 +216,7 @@
     assertTrue(qu.getSpaceConsumed() > 0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void cleanup() throws IOException {
     fHdfs.delete(new Path(testFileName), true);
     fHdfs.delete(notInMountpointPath, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
index e3b4fe2..e8476c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
@@ -23,8 +23,8 @@
  * Since viewfs has overlayed ViewFsFileStatus, we ran into
  * serialization problems. THis test is test the fix.
  */
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import java.io.IOException;
 import java.net.URI;
@@ -43,9 +43,9 @@
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 public class TestViewFsFileStatusHdfs {
   
@@ -59,7 +59,7 @@
   private static FileSystem fHdfs;
   private static FileSystem vfs;
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
@@ -108,15 +108,15 @@
     // Get checksum of different file in HDFS
     FileChecksum otherHdfsFileCheckSum = fHdfs.getFileChecksum(
       new Path(someFile+"other"));
-    // Checksums of the same file (got through HDFS and ViewFS should be same)
-    assertEquals("HDFS and ViewFS checksums were not the same", viewFSCheckSum,
-      hdfsCheckSum);
-    // Checksum of different files should be different.
-    assertFalse("Some other HDFS file which should not have had the same " +
-      "checksum as viewFS did!", viewFSCheckSum.equals(otherHdfsFileCheckSum));
+      // Checksums of the same file (got through HDFS and ViewFS should be same)
+      assertEquals(viewFSCheckSum,
+              hdfsCheckSum, "HDFS and ViewFS checksums were not the same");
+      // Checksum of different files should be different.
+      assertFalse(viewFSCheckSum.equals(otherHdfsFileCheckSum), "Some other HDFS file which should not have had the same " +
+              "checksum as viewFS did!");
   }
 
-  @AfterClass
+  @AfterAll
   public static void cleanup() throws IOException {
     fHdfs.delete(new Path(testfilename), true);
     fHdfs.delete(new Path(someFile), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
index 540883d..fdd30dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
@@ -36,13 +36,13 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestViewFsHdfs extends ViewFsBaseTest {
 
@@ -56,7 +56,7 @@
   }
 
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -72,7 +72,7 @@
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -80,7 +80,7 @@
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // create the test root on local_fs
     fcTarget = fc;
@@ -160,7 +160,7 @@
     FileContext otherfs = map.get("user1");
     otherfs.mkdir(user1Path, FileContext.DEFAULT_PERM, false);
     String owner = otherfs.getFileStatus(user1Path).getOwner();
-    assertEquals("The owner did not match ", owner, userUgi.getShortUserName());
+      assertEquals(owner, userUgi.getShortUserName(), "The owner did not match ");
     otherfs.delete(user1Path, false);
   }
  
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java
index 09e02be..e2f8c8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java
@@ -18,10 +18,7 @@
 package org.apache.hadoop.fs.viewfs;
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -46,11 +43,11 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test for viewfs with LinkFallback mount table entries.
@@ -62,7 +59,7 @@
   private static URI viewFsDefaultClusterUri;
   private Path targetTestRoot;
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning()
       throws IOException, URISyntaxException {
     int nameSpacesCount = 3;
@@ -88,14 +85,14 @@
 
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fsDefault;
     initializeTargetTestRoot();
@@ -295,7 +292,7 @@
       // attempt to create in fallback.
       vfs.mkdir(nextLevelToInternalDir, FsPermission.getDirDefault(),
           false);
-      Assert.fail("It should throw IOE when fallback fs not available.");
+      Assertions.fail("It should throw IOE when fallback fs not available.");
     } catch (IOException e) {
       cluster.restartNameNodes();
       // should succeed when fallback fs is back to normal.
@@ -570,7 +567,7 @@
     fs.rename(src, dst, Options.Rename.OVERWRITE);
     LambdaTestUtils
         .intercept(FileNotFoundException.class, () -> fs.getFileStatus(src));
-    Assert.assertNotNull(fs.getFileStatus(dst));
+    Assertions.assertNotNull(fs.getFileStatus(dst));
   }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
index 1243add..689f98f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
@@ -29,11 +29,8 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
+
 import java.util.List;
 
 import java.io.IOException;
@@ -44,8 +41,8 @@
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.fs.permission.FsAction.NONE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify ACL through ViewFs functionality.
@@ -61,7 +58,7 @@
   private FileContextTestHelper fileContextTestHelper =
       new FileContextTestHelper("/tmp/TestViewFsWithAcls");
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(clusterConf)
@@ -74,14 +71,14 @@
     fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fcTarget = fc;
     fcTarget2 = fc2;
@@ -105,7 +102,7 @@
     ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
     fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
index 9a4223a..452be3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
@@ -25,16 +25,12 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 
 import java.io.IOException;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify XAttrs through ViewFs functionality.
@@ -56,7 +52,7 @@
   protected static final String name2 = "user.a2";
   protected static final byte[] value2 = {0x37, 0x38, 0x39};
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     cluster = new MiniDFSCluster.Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -68,14 +64,14 @@
     fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fcTarget = fc;
     fcTarget2 = fc2;
@@ -99,7 +95,7 @@
     ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
     fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
index 10b1803..5ad280c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -29,6 +29,7 @@
 import java.util.Random;
 
 import org.apache.hadoop.util.Lists;
+import org.junit.jupiter.api.AfterEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -46,8 +47,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This class provide utilities for testing of the admin operations of nodes.
@@ -81,7 +81,7 @@
     return cluster;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     // Set up the hosts/exclude files.
     hostsFileWriter = new HostsFileWriter();
@@ -108,7 +108,7 @@
 
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     hostsFileWriter.cleanup();
     shutdownCluster();
@@ -381,7 +381,7 @@
   protected static void validateCluster(DFSClient client, int numDNs)
       throws IOException {
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDNs, info.length);
+      assertEquals(numDNs, info.length, "Number of Datanodes ");
   }
 
   /** Start a MiniDFSCluster.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
index f7d90d2..208714c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -97,9 +95,9 @@
     }
     
     LOG.info("partition=" + Arrays.toString(p));
-    assertTrue("i=0", p[0] > 0 && p[0] < n);
+      assertTrue(p[0] > 0 && p[0] < n, "i=0");
     for(int i = 1; i < p.length; i++) {
-      assertTrue("i=" + i, p[i] > p[i - 1] && p[i] < n);
+        assertTrue(p[i] > p[i - 1] && p[i] < n, "i=" + i);
     }
     return p;
   }
@@ -217,8 +215,8 @@
       boolean checkFileStatus) throws IOException {
     if (checkFileStatus) {
       final FileStatus status = fs.getFileStatus(name);
-      assertEquals("len=" + len + " but status.getLen()=" + status.getLen(),
-          len, status.getLen());
+        assertEquals(
+                len, status.getLen(), "len=" + len + " but status.getLen()=" + status.getLen());
     }
 
     FSDataInputStream stm = fs.open(name);
@@ -231,9 +229,9 @@
   private static void checkData(final byte[] actual, int from,
                                 final byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                   expected[from+idx]+" actual "+actual[idx],
-                   expected[from+idx], actual[idx]);
+        assertEquals(
+                expected[from + idx], actual[idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index d813375..f0c44bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -25,10 +25,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.BufferedOutputStream;
 import java.io.BufferedReader;
@@ -190,8 +187,8 @@
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.VersionInfo;
-import org.junit.Assert;
-import org.junit.Assume;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
 import org.apache.hadoop.util.ToolRunner;
 
 import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
@@ -1672,15 +1669,15 @@
   }
 
   public static void checkComponentsEquals(byte[][] expected, byte[][] actual) {
-    assertEquals("expected: " + DFSUtil.byteArray2PathString(expected)
-        + ", actual: " + DFSUtil.byteArray2PathString(actual), expected.length,
-        actual.length);
+      assertEquals(expected.length,
+              actual.length, "expected: " + DFSUtil.byteArray2PathString(expected)
+              + ", actual: " + DFSUtil.byteArray2PathString(actual));
     int i = 0;
     for (byte[] e : expected) {
       byte[] actualComponent = actual[i++];
-      assertTrue("expected: " + DFSUtil.bytes2String(e) + ", actual: "
-          + DFSUtil.bytes2String(actualComponent),
-          Arrays.equals(e, actualComponent));
+        assertTrue(
+                Arrays.equals(e, actualComponent), "expected: " + DFSUtil.bytes2String(e) + ", actual: "
+                + DFSUtil.bytes2String(actualComponent));
     }
   }
 
@@ -1699,7 +1696,7 @@
       this.sockDir = new TemporarySocketDirectory();
       DomainSocket.disableBindPathValidation();
       formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
-      Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+      Assumptions.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
     }
     
     public Configuration newConfiguration() {
@@ -1737,7 +1734,7 @@
     try (FSDataInputStream in1 = fs.open(p1);
          FSDataInputStream in2 = fs.open(p2)) {
       for (int i = 0; i < len; i++) {
-        assertEquals("Mismatch at byte " + i, in1.read(), in2.read());
+          assertEquals(in1.read(), in2.read(), "Mismatch at byte " + i);
       }
     }
   }
@@ -1813,32 +1810,32 @@
         client.getReplicatedBlockStats();
     ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupStats();
 
-    assertEquals("Under replicated stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
-        aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
-    assertEquals("Low redundancy stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
-        replicatedBlockStats.getLowRedundancyBlocks() +
-            ecBlockGroupStats.getLowRedundancyBlockGroups());
-    assertEquals("Corrupt blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
-        replicatedBlockStats.getCorruptBlocks() +
-            ecBlockGroupStats.getCorruptBlockGroups());
-    assertEquals("Missing blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
-        replicatedBlockStats.getMissingReplicaBlocks() +
-            ecBlockGroupStats.getMissingBlockGroups());
-    assertEquals("Missing blocks with replication factor one not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
-        replicatedBlockStats.getMissingReplicationOneBlocks());
-    assertEquals("Bytes in future blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
-        replicatedBlockStats.getBytesInFutureBlocks() +
-            ecBlockGroupStats.getBytesInFutureBlockGroups());
-    assertEquals("Pending deletion blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
-        replicatedBlockStats.getPendingDeletionBlocks() +
-            ecBlockGroupStats.getPendingDeletionBlocks());
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
+              aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX], "Under replicated stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
+              replicatedBlockStats.getLowRedundancyBlocks() +
+                      ecBlockGroupStats.getLowRedundancyBlockGroups(), "Low redundancy stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
+              replicatedBlockStats.getCorruptBlocks() +
+                      ecBlockGroupStats.getCorruptBlockGroups(), "Corrupt blocks stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
+              replicatedBlockStats.getMissingReplicaBlocks() +
+                      ecBlockGroupStats.getMissingBlockGroups(), "Missing blocks stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
+              replicatedBlockStats.getMissingReplicationOneBlocks(), "Missing blocks with replication factor one not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
+              replicatedBlockStats.getBytesInFutureBlocks() +
+                      ecBlockGroupStats.getBytesInFutureBlockGroups(), "Bytes in future blocks stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
+              replicatedBlockStats.getPendingDeletionBlocks() +
+                      ecBlockGroupStats.getPendingDeletionBlocks(), "Pending deletion blocks stats not matching!");
   }
 
   /**
@@ -1884,8 +1881,8 @@
       ExtendedBlock blk) {
     BlockManager bm0 = nn.getNamesystem().getBlockManager();
     BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
-    assertTrue("Block " + blk + " should be under construction, " +
-        "got: " + storedBlock, !storedBlock.isComplete());
+      assertTrue(!storedBlock.isComplete(), "Block " + blk + " should be under construction, " +
+              "got: " + storedBlock);
     // We expect that the replica with the most recent heart beat will be
     // the one to be in charge of the synchronization / recovery protocol.
     final DatanodeStorageInfo[] storages = storedBlock
@@ -1933,8 +1930,8 @@
     }
     assertEquals(retcode, ret);
     if (contain != null) {
-      assertTrue("The real output is: " + output + ".\n It should contain: "
-          + contain, output.contains(contain));
+        assertTrue(output.contains(contain), "The real output is: " + output + ".\n It should contain: "
+                + contain);
     }
   }
 
@@ -2338,23 +2335,23 @@
 
   public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
       Path trashPath, boolean shouldExistInTrash) throws Exception {
-    assertTrue(path + " file does not exist", fs.exists(path));
+      assertTrue(fs.exists(path), path + " file does not exist");
 
     // Verify that trashPath has a path component named ".Trash"
     Path checkTrash = trashPath;
     while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
       checkTrash = checkTrash.getParent();
     }
-    assertEquals("No .Trash component found in trash path " + trashPath,
-        ".Trash", checkTrash.getName());
+      assertEquals(
+              ".Trash", checkTrash.getName(), "No .Trash component found in trash path " + trashPath);
 
     String[] argv = new String[]{"-rm", "-r", path.toString()};
     int res = ToolRunner.run(shell, argv);
-    assertEquals("rm failed", 0, res);
+      assertEquals(0, res, "rm failed");
     if (shouldExistInTrash) {
-      assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+        assertTrue(fs.exists(trashPath), "File not in trash : " + trashPath);
     } else {
-      assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+        assertFalse(fs.exists(trashPath), "File in trash : " + trashPath);
     }
   }
 
@@ -2563,7 +2560,7 @@
       Path filePath, int namenodeCount, boolean createMoverPath)
           throws IOException {
     final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-    Assert.assertEquals(namenodeCount, namenodes.size());
+    Assertions.assertEquals(namenodeCount, namenodes.size());
     NameNodeConnector.checkOtherInstanceRunning(createMoverPath);
     while (true) {
       try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
index a8f7378..ea733e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
@@ -26,9 +26,9 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /** This is a comprehensive append test that tries
  * all combinations of file length and number of appended bytes
@@ -59,7 +59,7 @@
     conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void startUp () throws IOException {
     conf = new HdfsConfiguration();
     init(conf);
@@ -67,7 +67,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
index 284fdb7..b43c9fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
@@ -27,7 +27,7 @@
 import java.util.Collection;
 import java.util.List;
 
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 /**
  * Test striped file write operation with data node failures with parameterized
@@ -62,9 +62,9 @@
     }
     final int i = base;
     final Integer length = getLength(i);
-    assumeTrue("Skip test " + i + " since length=null.", length != null);
-    assumeTrue("Test " + i + ", length=" + length
-        + ", is not chosen to run.", RANDOM.nextInt(16) != 0);
+    assumeTrue(length != null, "Skip test " + i + " since length=null.");
+    assumeTrue(RANDOM.nextInt(16) != 0, "Test " + i + ", length=" + length
+        + ", is not chosen to run.");
     System.out.println("Run test " + i + ", length=" + length);
     runTest(length);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
index e149744..ad7bbdc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
@@ -30,7 +30,7 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -200,11 +200,11 @@
         + ", parityBlkDelNum = " + parityBlkDelNum
         + ", deleteBlockFile? " + deleteBlockFile);
     int recoverBlkNum = dataBlkDelNum + parityBlkDelNum;
-    Assert.assertTrue("dataBlkDelNum and parityBlkDelNum should be positive",
-        dataBlkDelNum >= 0 && parityBlkDelNum >= 0);
-    Assert.assertTrue("The sum of dataBlkDelNum and parityBlkDelNum " +
-        "should be between 1 ~ " + NUM_PARITY_UNITS, recoverBlkNum <=
-        NUM_PARITY_UNITS);
+      Assertions.assertTrue(
+              dataBlkDelNum >= 0 && parityBlkDelNum >= 0, "dataBlkDelNum and parityBlkDelNum should be positive");
+      Assertions.assertTrue(recoverBlkNum <=
+              NUM_PARITY_UNITS, "The sum of dataBlkDelNum and parityBlkDelNum " +
+              "should be between 1 ~ " + NUM_PARITY_UNITS);
 
     // write a file with the length of writeLen
     Path srcPath = new Path(src);
@@ -232,10 +232,10 @@
 
     int[] delDataBlkIndices = StripedFileTestUtil.randomArray(0, NUM_DATA_UNITS,
         dataBlkDelNum);
-    Assert.assertNotNull(delDataBlkIndices);
+    Assertions.assertNotNull(delDataBlkIndices);
     int[] delParityBlkIndices = StripedFileTestUtil.randomArray(NUM_DATA_UNITS,
         NUM_DATA_UNITS + NUM_PARITY_UNITS, parityBlkDelNum);
-    Assert.assertNotNull(delParityBlkIndices);
+    Assertions.assertNotNull(delParityBlkIndices);
 
     int[] delBlkIndices = new int[recoverBlkNum];
     System.arraycopy(delDataBlkIndices, 0,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 6578ad0..c0150d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -37,7 +37,7 @@
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,7 +55,7 @@
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class StripedFileTestUtil {
   public static final Logger LOG =
@@ -77,7 +77,7 @@
   static void verifyLength(FileSystem fs, Path srcPath, int fileLength)
       throws IOException {
     FileStatus status = fs.getFileStatus(srcPath);
-    assertEquals("File length should be the same", fileLength, status.getLen());
+      assertEquals(fileLength, status.getLen(), "File length should be the same");
   }
 
   static void verifyPread(DistributedFileSystem fs, Path srcPath,
@@ -109,9 +109,9 @@
           offset += target;
         }
         for (int i = 0; i < fileLength - startOffset; i++) {
-          assertEquals("Byte at " + (startOffset + i) + " is different, "
-              + "the startOffset is " + startOffset, expected[startOffset + i],
-              result[i]);
+            assertEquals(expected[startOffset + i],
+                    result[i], "Byte at " + (startOffset + i) + " is different, "
+                    + "the startOffset is " + startOffset);
         }
       }
     }
@@ -127,8 +127,8 @@
         System.arraycopy(buf, 0, result, readLen, ret);
         readLen += ret;
       }
-      assertEquals("The length of file should be the same to write size", fileLength, readLen);
-      Assert.assertArrayEquals(expected, result);
+        assertEquals(fileLength, readLen, "The length of file should be the same to write size");
+      Assertions.assertArrayEquals(expected, result);
     }
   }
 
@@ -144,8 +144,8 @@
         result.put(buf);
         buf.clear();
       }
-      assertEquals("The length of file should be the same to write size", fileLength, readLen);
-      Assert.assertArrayEquals(expected, result.array());
+        assertEquals(fileLength, readLen, "The length of file should be the same to write size");
+      Assertions.assertArrayEquals(expected, result.array());
     }
   }
 
@@ -185,14 +185,14 @@
       if (!(in.getWrappedStream() instanceof WebHdfsInputStream)) {
         try {
           in.seek(-1);
-          Assert.fail("Should be failed if seek to negative offset");
+          Assertions.fail("Should be failed if seek to negative offset");
         } catch (EOFException e) {
           // expected
         }
 
         try {
           in.seek(fileLength + 1);
-          Assert.fail("Should be failed if seek after EOF");
+          Assertions.fail("Should be failed if seek after EOF");
         } catch (EOFException e) {
           // expected
         }
@@ -206,8 +206,8 @@
     byte[] buf = new byte[writeBytes - pos];
     IOUtils.readFully(fsdis, buf, 0, buf.length);
     for (int i = 0; i < buf.length; i++) {
-      assertEquals("Byte at " + i + " should be the same",
-          StripedFileTestUtil.getByte(pos + i), buf[i]);
+        assertEquals(
+                StripedFileTestUtil.getByte(pos + i), buf[i], "Byte at " + i + " should be the same");
     }
   }
 
@@ -225,7 +225,7 @@
       final DatanodeInfo[] datanodes = streamer.getNodes();
       if (datanodes != null) {
         assertEquals(1, datanodes.length);
-        Assert.assertNotNull(datanodes[0]);
+        Assertions.assertNotNull(datanodes[0]);
         return datanodes[0];
       }
       try {
@@ -377,13 +377,13 @@
     final int parityBlkNum = ecPolicy.getNumParityUnits();
     int index = 0;
     for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
-      Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
+      Assertions.assertTrue(firstBlock instanceof LocatedStripedBlock);
 
       final long gs = firstBlock.getBlock().getGenerationStamp();
       final long oldGS = oldGSList != null ? oldGSList.get(index++) : -1L;
       final String s = "gs=" + gs + ", oldGS=" + oldGS;
       LOG.info(s);
-      Assert.assertTrue(s, gs >= oldGS);
+        Assertions.assertTrue(gs >= oldGS, s);
 
       LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(
           (LocatedStripedBlock) firstBlock, cellSize,
@@ -456,7 +456,7 @@
         for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
           final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(
               cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
-          Assert.assertTrue(posInFile < length);
+          Assertions.assertTrue(posInFile < length);
           final byte expected = getByte(posInFile);
 
           if (killed) {
@@ -466,7 +466,7 @@
               String s = "expected=" + expected + " but actual=" + actual[posInBlk]
                   + ", posInFile=" + posInFile + ", posInBlk=" + posInBlk
                   + ". group=" + group + ", i=" + i;
-              Assert.fail(s);
+              Assertions.fail(s);
             }
           }
         }
@@ -507,12 +507,12 @@
     try {
       encoder.encode(dataBytes, expectedParityBytes);
     } catch (IOException e) {
-      Assert.fail("Unexpected IOException: " + e.getMessage());
+      Assertions.fail("Unexpected IOException: " + e.getMessage());
     }
     for (int i = 0; i < parityBytes.length; i++) {
       if (checkSet.contains(i + dataBytes.length)){
-        Assert.assertArrayEquals("i=" + i, expectedParityBytes[i],
-            parityBytes[i]);
+          Assertions.assertArrayEquals(expectedParityBytes[i],
+                  parityBytes[i], "i=" + i);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
index e7d8b38..9632672 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
@@ -30,10 +30,10 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test abandoning blocks, which clients do on pipeline creation failure.
@@ -48,14 +48,14 @@
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
     fs = cluster.getFileSystem();
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (fs != null) {
       fs.close();
@@ -100,8 +100,8 @@
     cluster.restartNameNode();
     blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
         Integer.MAX_VALUE);
-    Assert.assertEquals("Blocks " + b + " has not been abandoned.",
-        orginalNumBlocks, blocks.locatedBlockCount() + 1);
+      Assertions.assertEquals(
+              orginalNumBlocks, blocks.locatedBlockCount() + 1, "Blocks " + b + " has not been abandoned.");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
index 105836e..6c5b2c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
@@ -43,12 +43,11 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import static org.junit.jupiter.api.Assertions.*;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the ACLs system through the full code path.  It overlaps
@@ -89,7 +88,7 @@
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
 
-  @BeforeClass
+  @BeforeAll
   public static void captureUser() throws IOException {
     realUgi = UserGroupInformation.getCurrentUser();
     realUser = System.getProperty("user.name");
@@ -174,7 +173,7 @@
 
       kmsDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
 
-      Assert.assertTrue(kmsDir.mkdirs());
+      Assertions.assertTrue(kmsDir.mkdirs());
     }
 
     writeConf(kmsDir, conf);
@@ -411,66 +410,66 @@
     try {
       setup(conf);
 
-      // Create a test key
-      assertTrue("Exception during creation of key " + KEY1 + " by "
-          + keyadminUgi.getUserName(), createKey(keyadminUgi, KEY1, conf));
+        // Create a test key
+        assertTrue(createKey(keyadminUgi, KEY1, conf), "Exception during creation of key " + KEY1 + " by "
+                + keyadminUgi.getUserName());
 
-      // Fail to create a test key
-      assertFalse("Allowed creation of key " + KEY2 + " by "
-          + hdfsUgi.getUserName(), createKey(hdfsUgi, KEY2, conf));
-      assertFalse("Allowed creation of key " + KEY2 + " by "
-          + userUgi.getUserName(), createKey(userUgi, KEY2, conf));
+        // Fail to create a test key
+        assertFalse(createKey(hdfsUgi, KEY2, conf), "Allowed creation of key " + KEY2 + " by "
+                + hdfsUgi.getUserName());
+        assertFalse(createKey(userUgi, KEY2, conf), "Allowed creation of key " + KEY2 + " by "
+                + userUgi.getUserName());
 
       // Create a directory and chown it to the normal user.
       fs.mkdirs(ZONE1);
       fs.setOwner(ZONE1, userUgi.getUserName(),
           userUgi.getPrimaryGroupName());
 
-      // Create an EZ
-      assertTrue("Exception during creation of EZ " + ZONE1 + " by "
-          + hdfsUgi.getUserName() + " using key " + KEY1,
-            createEncryptionZone(hdfsUgi, KEY1, ZONE1));
+        // Create an EZ
+        assertTrue(
+                createEncryptionZone(hdfsUgi, KEY1, ZONE1), "Exception during creation of EZ " + ZONE1 + " by "
+                + hdfsUgi.getUserName() + " using key " + KEY1);
 
-      // Fail to create an EZ
-      assertFalse("Allowed creation of EZ " + ZONE2 + " by "
-          + keyadminUgi.getUserName() + " using key " + KEY1,
-            createEncryptionZone(keyadminUgi, KEY1, ZONE2));
-      assertFalse("Allowed creation of EZ " + ZONE2 + " by "
-          + userUgi.getUserName() + " using key " + KEY1,
-            createEncryptionZone(userUgi, KEY1, ZONE2));
+        // Fail to create an EZ
+        assertFalse(
+                createEncryptionZone(keyadminUgi, KEY1, ZONE2), "Allowed creation of EZ " + ZONE2 + " by "
+                + keyadminUgi.getUserName() + " using key " + KEY1);
+        assertFalse(
+                createEncryptionZone(userUgi, KEY1, ZONE2), "Allowed creation of EZ " + ZONE2 + " by "
+                + userUgi.getUserName() + " using key " + KEY1);
 
-      // Create a file in the zone
-      assertTrue("Exception during creation of file " + FILE1 + " by "
-          + userUgi.getUserName(), createFile(userUgi, FILE1, TEXT));
+        // Create a file in the zone
+        assertTrue(createFile(userUgi, FILE1, TEXT), "Exception during creation of file " + FILE1 + " by "
+                + userUgi.getUserName());
 
-      // Fail to create a file in the zone
-      assertFalse("Allowed creation of file " + FILE1A + " by "
-          + hdfsUgi.getUserName(), createFile(hdfsUgi, FILE1A, TEXT));
-      assertFalse("Allowed creation of file " + FILE1A + " by "
-          + keyadminUgi.getUserName(), createFile(keyadminUgi, FILE1A, TEXT));
+        // Fail to create a file in the zone
+        assertFalse(createFile(hdfsUgi, FILE1A, TEXT), "Allowed creation of file " + FILE1A + " by "
+                + hdfsUgi.getUserName());
+        assertFalse(createFile(keyadminUgi, FILE1A, TEXT), "Allowed creation of file " + FILE1A + " by "
+                + keyadminUgi.getUserName());
 
-      // Read a file in the zone
-      assertTrue("Exception while reading file " + FILE1 + " by "
-          + userUgi.getUserName(), compareFile(userUgi, FILE1, TEXT));
+        // Read a file in the zone
+        assertTrue(compareFile(userUgi, FILE1, TEXT), "Exception while reading file " + FILE1 + " by "
+                + userUgi.getUserName());
 
-      // Fail to read a file in the zone
-      assertFalse("Allowed reading of file " + FILE1 + " by "
-          + hdfsUgi.getUserName(), compareFile(hdfsUgi, FILE1, TEXT));
-      assertFalse("Allowed reading of file " + FILE1 + " by "
-          + keyadminUgi.getUserName(), compareFile(keyadminUgi, FILE1, TEXT));
+        // Fail to read a file in the zone
+        assertFalse(compareFile(hdfsUgi, FILE1, TEXT), "Allowed reading of file " + FILE1 + " by "
+                + hdfsUgi.getUserName());
+        assertFalse(compareFile(keyadminUgi, FILE1, TEXT), "Allowed reading of file " + FILE1 + " by "
+                + keyadminUgi.getUserName());
 
       // Remove the zone
       fs.delete(ZONE1, true);
 
-      // Fail to remove the key
-      assertFalse("Allowed deletion of file " + FILE1 + " by "
-          + hdfsUgi.getUserName(), deleteKey(hdfsUgi, KEY1));
-      assertFalse("Allowed deletion of file " + FILE1 + " by "
-          + userUgi.getUserName(), deleteKey(userUgi, KEY1));
+        // Fail to remove the key
+        assertFalse(deleteKey(hdfsUgi, KEY1), "Allowed deletion of file " + FILE1 + " by "
+                + hdfsUgi.getUserName());
+        assertFalse(deleteKey(userUgi, KEY1), "Allowed deletion of file " + FILE1 + " by "
+                + userUgi.getUserName());
 
-      // Remove
-      assertTrue("Exception during deletion of file " + FILE1 + " by "
-          + keyadminUgi.getUserName(), deleteKey(keyadminUgi, KEY1));
+        // Remove
+        assertTrue(deleteKey(keyadminUgi, KEY1), "Exception during deletion of file " + FILE1 + " by "
+                + keyadminUgi.getUserName());
     } finally {
       fs.delete(ZONE1, true);
       fs.delete(ZONE2, true);
@@ -495,8 +494,8 @@
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation with correct config"
-          + " using whitelist key ACLs", createKey(realUgi, KEY1, conf));
+        assertTrue(createKey(realUgi, KEY1, conf), "Exception during key creation with correct config"
+                + " using whitelist key ACLs");
     } finally {
       teardown();
     }
@@ -512,8 +511,8 @@
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation with correct config"
-          + " using default key ACLs", createKey(realUgi, KEY2, conf));
+        assertTrue(createKey(realUgi, KEY2, conf), "Exception during key creation with correct config"
+                + " using default key ACLs");
     } finally {
       teardown();
     }
@@ -531,8 +530,8 @@
     try {
       setup(conf);
 
-      assertFalse("Allowed key creation with blacklist for CREATE",
-          createKey(realUgi, KEY3, conf));
+        assertFalse(
+                createKey(realUgi, KEY3, conf), "Allowed key creation with blacklist for CREATE");
     } finally {
       teardown();
     }
@@ -547,8 +546,8 @@
     try {
       setup(conf);
 
-      assertFalse("Allowed key creation without CREATE KMS ACL",
-          createKey(realUgi, KEY3, conf));
+        assertFalse(
+                createKey(realUgi, KEY3, conf), "Allowed key creation without CREATE KMS ACL");
     } finally {
       teardown();
     }
@@ -562,8 +561,8 @@
     try {
       setup(conf);
 
-      assertFalse("Allowed key creation without MANAGMENT key ACL",
-          createKey(realUgi, KEY3, conf));
+        assertFalse(
+                createKey(realUgi, KEY3, conf), "Allowed key creation without MANAGMENT key ACL");
     } finally {
       teardown();
     }
@@ -581,8 +580,8 @@
     try {
       setup(conf);
 
-      assertFalse("Allowed key creation when default key ACL should have been"
-          + " overridden by key ACL", createKey(realUgi, KEY3, conf));
+        assertFalse(createKey(realUgi, KEY3, conf), "Allowed key creation when default key ACL should have been"
+                + " overridden by key ACL");
     } finally {
       teardown();
     }
@@ -596,8 +595,8 @@
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation with default KMS ACLs",
-          createKey(realUgi, KEY3, conf));
+        assertTrue(
+                createKey(realUgi, KEY3, conf), "Exception during key creation with default KMS ACLs");
     } finally {
       teardown();
     }
@@ -620,8 +619,8 @@
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY1, conf));
+        assertTrue(
+                createKey(realUgi, KEY1, conf), "Exception during key creation");
     } finally {
       teardown();
     }
@@ -647,8 +646,8 @@
 
       fs.mkdirs(ZONE1);
 
-      assertTrue("Exception during zone creation with correct config using"
-          + " whitelist key ACLs", createEncryptionZone(realUgi, KEY1, ZONE1));
+        assertTrue(createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation with correct config using"
+                + " whitelist key ACLs");
     } finally {
       fs.delete(ZONE1, true);
       teardown();
@@ -671,8 +670,8 @@
 
       fs.mkdirs(ZONE2);
 
-      assertTrue("Exception during zone creation with correct config using"
-          + " default key ACLs", createEncryptionZone(realUgi, KEY1, ZONE2));
+        assertTrue(createEncryptionZone(realUgi, KEY1, ZONE2), "Exception during zone creation with correct config using"
+                + " default key ACLs");
     } finally {
       fs.delete(ZONE2, true);
       teardown();
@@ -697,9 +696,9 @@
 
       fs.mkdirs(ZONE3);
 
-      assertFalse("Allowed creation of zone when default key ACLs should have"
-          + " been overridden by key ACL",
-            createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed creation of zone when default key ACLs should have"
+                + " been overridden by key ACL");
     } finally {
       fs.delete(ZONE3, true);
       teardown();
@@ -724,8 +723,8 @@
 
       fs.mkdirs(ZONE3);
 
-      assertFalse("Allowed zone creation of zone with blacklisted GET_METADATA",
-          createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed zone creation of zone with blacklisted GET_METADATA");
     } finally {
       fs.delete(ZONE3, true);
       teardown();
@@ -750,8 +749,8 @@
 
       fs.mkdirs(ZONE3);
 
-      assertFalse("Allowed zone creation of zone with blacklisted GENERATE_EEK",
-          createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed zone creation of zone with blacklisted GENERATE_EEK");
     } finally {
       fs.delete(ZONE3, true);
       teardown();
@@ -771,8 +770,8 @@
 
       fs.mkdirs(ZONE3);
 
-      assertTrue("Exception during zone creation with default KMS ACLs",
-          createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Exception during zone creation with default KMS ACLs");
     } finally {
       fs.delete(ZONE3, true);
       teardown();
@@ -794,8 +793,8 @@
 
       fs.mkdirs(ZONE4);
 
-      assertFalse("Allowed zone creation without GET_METADATA KMS ACL",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GET_METADATA KMS ACL");
     } finally {
       fs.delete(ZONE4, true);
       teardown();
@@ -817,8 +816,8 @@
 
       fs.mkdirs(ZONE4);
 
-      assertFalse("Allowed zone creation without GENERATE_EEK KMS ACL",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GENERATE_EEK KMS ACL");
     } finally {
       fs.delete(ZONE4, true);
       teardown();
@@ -839,8 +838,8 @@
 
       fs.mkdirs(ZONE4);
 
-      assertFalse("Allowed zone creation without READ ACL",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without READ ACL");
     } finally {
       fs.delete(ZONE4, true);
       teardown();
@@ -861,8 +860,8 @@
 
       fs.mkdirs(ZONE4);
 
-      assertFalse("Allowed zone creation without GENERATE_EEK ACL",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GENERATE_EEK ACL");
     } finally {
       fs.delete(ZONE4, true);
       teardown();
@@ -896,20 +895,20 @@
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY1, conf));
+        assertTrue(
+                createKey(realUgi, KEY1, conf), "Exception during key creation");
       fs.mkdirs(ZONE1);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE1));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation");
       fs.mkdirs(ZONE2);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE2));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE2), "Exception during zone creation");
       fs.mkdirs(ZONE3);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Exception during zone creation");
       fs.mkdirs(ZONE4);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Exception during zone creation");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
       fs.delete(ZONE2, true);
@@ -941,8 +940,8 @@
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception during file creation with correct config"
-          + " using whitelist ACL", createFile(realUgi, FILE1, TEXT));
+        assertTrue(createFile(realUgi, FILE1, TEXT), "Exception during file creation with correct config"
+                + " using whitelist ACL");
     } finally {
       fs.delete(ZONE1, true);
       teardown();
@@ -963,8 +962,8 @@
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception during file creation with correct config"
-          + " using whitelist ACL", createFile(realUgi, FILE2, TEXT));
+        assertTrue(createFile(realUgi, FILE2, TEXT), "Exception during file creation with correct config"
+                + " using whitelist ACL");
     } finally {
       fs.delete(ZONE2, true);
       teardown();
@@ -987,8 +986,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation when default key ACLs should have been"
-          + " overridden by key ACL", createFile(realUgi, FILE3, TEXT));
+        assertFalse(createFile(realUgi, FILE3, TEXT), "Allowed file creation when default key ACLs should have been"
+                + " overridden by key ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1014,8 +1013,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation with blacklist for GENERATE_EEK",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation with blacklist for GENERATE_EEK");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1041,8 +1040,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation with blacklist for DECRYPT_EEK",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation with blacklist for DECRYPT_EEK");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1062,8 +1061,8 @@
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception during file creation with default KMS ACLs",
-          createFile(realUgi, FILE3, TEXT));
+        assertTrue(
+                createFile(realUgi, FILE3, TEXT), "Exception during file creation with default KMS ACLs");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1086,8 +1085,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation without GENERATE_EEK KMS ACL",
-          createFile(realUgi, FILE4, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE4, TEXT), "Allowed file creation without GENERATE_EEK KMS ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1110,8 +1109,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation without DECRYPT_EEK KMS ACL",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation without DECRYPT_EEK KMS ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1133,8 +1132,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation without GENERATE_EEK key ACL",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation without GENERATE_EEK key ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1156,8 +1155,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation without DECRYPT_EEK key ACL",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation without DECRYPT_EEK key ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1198,13 +1197,13 @@
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY1, conf));
+        assertTrue(
+                createKey(realUgi, KEY1, conf), "Exception during key creation");
       fs.mkdirs(ZONE1);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE1));
-      assertTrue("Exception during file creation",
-              createFile(realUgi, FILE1, TEXT));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation");
+        assertTrue(
+                createFile(realUgi, FILE1, TEXT), "Exception during file creation");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1229,8 +1228,8 @@
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception while reading file with correct config with"
-          + " whitelist ACLs", compareFile(realUgi, FILE1, TEXT));
+        assertTrue(compareFile(realUgi, FILE1, TEXT), "Exception while reading file with correct config with"
+                + " whitelist ACLs");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1250,8 +1249,8 @@
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception while reading file with correct config"
-          + " with default ACLs", compareFile(realUgi, FILE1, TEXT));
+        assertTrue(compareFile(realUgi, FILE1, TEXT), "Exception while reading file with correct config"
+                + " with default ACLs");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1273,8 +1272,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file read when default key ACLs should have been"
-          + " overridden by key ACL", compareFile(realUgi, FILE1, TEXT));
+        assertFalse(compareFile(realUgi, FILE1, TEXT), "Allowed file read when default key ACLs should have been"
+                + " overridden by key ACL");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1296,8 +1295,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file read with blacklist for DECRYPT_EEK",
-          compareFile(realUgi, FILE1, TEXT));
+        assertFalse(
+                compareFile(realUgi, FILE1, TEXT), "Allowed file read with blacklist for DECRYPT_EEK");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1315,8 +1314,8 @@
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception while reading file with default KMS ACLs",
-          compareFile(realUgi, FILE1, TEXT));
+        assertTrue(
+                compareFile(realUgi, FILE1, TEXT), "Exception while reading file with default KMS ACLs");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1335,8 +1334,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file read without DECRYPT_EEK KMS ACL",
-          compareFile(realUgi, FILE1, TEXT));
+        assertFalse(
+                compareFile(realUgi, FILE1, TEXT), "Allowed file read without DECRYPT_EEK KMS ACL");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1351,8 +1350,8 @@
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file read without DECRYPT_EEK key ACL",
-          compareFile(realUgi, FILE1, TEXT));
+        assertFalse(
+                compareFile(realUgi, FILE1, TEXT), "Allowed file read without DECRYPT_EEK key ACL");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1379,12 +1378,12 @@
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY1, conf));
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY2, conf));
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY3, conf));
+        assertTrue(
+                createKey(realUgi, KEY1, conf), "Exception during key creation");
+        assertTrue(
+                createKey(realUgi, KEY2, conf), "Exception during key creation");
+        assertTrue(
+                createKey(realUgi, KEY3, conf), "Exception during key creation");
     } finally {
       teardown();
     }
@@ -1405,8 +1404,8 @@
     try {
       setup(conf, false);
 
-      assertTrue("Exception during key deletion with correct config"
-          + " using whitelist key ACLs", deleteKey(realUgi, KEY1));
+        assertTrue(deleteKey(realUgi, KEY1), "Exception during key deletion with correct config"
+                + " using whitelist key ACLs");
     } finally {
       teardown();
     }
@@ -1422,8 +1421,8 @@
     try {
       setup(conf, false);
 
-      assertTrue("Exception during key deletion with correct config"
-          + " using default key ACLs", deleteKey(realUgi, KEY2));
+        assertTrue(deleteKey(realUgi, KEY2), "Exception during key deletion with correct config"
+                + " using default key ACLs");
     } finally {
       teardown();
     }
@@ -1441,8 +1440,8 @@
     try {
       setup(conf, false);
 
-      assertFalse("Allowed key deletion with blacklist for DELETE",
-          deleteKey(realUgi, KEY3));
+        assertFalse(
+                deleteKey(realUgi, KEY3), "Allowed key deletion with blacklist for DELETE");
     } finally {
       teardown();
     }
@@ -1457,8 +1456,8 @@
     try {
       setup(conf, false);
 
-      assertFalse("Allowed key deletion without DELETE KMS ACL",
-          deleteKey(realUgi, KEY3));
+        assertFalse(
+                deleteKey(realUgi, KEY3), "Allowed key deletion without DELETE KMS ACL");
     } finally {
       teardown();
     }
@@ -1473,8 +1472,8 @@
     try {
       setup(conf, false);
 
-      assertFalse("Allowed key deletion without MANAGMENT key ACL",
-          deleteKey(realUgi, KEY3));
+        assertFalse(
+                deleteKey(realUgi, KEY3), "Allowed key deletion without MANAGMENT key ACL");
     } finally {
       teardown();
     }
@@ -1492,8 +1491,8 @@
     try {
       setup(conf, false);
 
-      assertFalse("Allowed key deletion when default key ACL should have been"
-          + " overridden by key ACL", deleteKey(realUgi, KEY3));
+        assertFalse(deleteKey(realUgi, KEY3), "Allowed key deletion when default key ACL should have been"
+                + " overridden by key ACL");
     } finally {
       teardown();
     }
@@ -1507,8 +1506,8 @@
     try {
       setup(conf, false);
 
-      assertTrue("Exception during key deletion with default KMS ACLs",
-          deleteKey(realUgi, KEY3));
+        assertTrue(
+                deleteKey(realUgi, KEY3), "Exception during key deletion with default KMS ACLs");
     } finally {
       teardown();
     }
@@ -1596,8 +1595,8 @@
         FSDataInputStream din =  cluster.getFileSystem().open(file);
         BufferedReader in = new BufferedReader(new InputStreamReader(din));
 
-        assertEquals("The text read does not match the text written",
-            text, in.readLine());
+          assertEquals(
+                  text, in.readLine(), "The text read does not match the text written");
       }
     });
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
index eedbdb9..5894a38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
@@ -26,10 +26,10 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
 
 /**
  * Test cases for trying to append to a file with a different
@@ -44,7 +44,7 @@
   private static FileSystem fs; 
   
 
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
@@ -55,7 +55,7 @@
     fs = cluster.getFileSystem();
   }
   
-  @AfterClass
+  @AfterAll
   public static void teardown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -68,7 +68,7 @@
    * difficulties in doing so.
    */
   @Test
-  @Ignore("this is not implemented! See HDFS-2130")
+  @Disabled("this is not implemented! See HDFS-2130")
   public void testSwitchChunkSize() throws IOException {
     FileSystem fsWithSmallChunk = createFsWithChecksum("CRC32", 512);
     FileSystem fsWithBigChunk = createFsWithChecksum("CRC32", 1024);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index b4e9550..c68f05a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -45,11 +45,10 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 import org.slf4j.event.Level;
 
@@ -78,7 +77,7 @@
   static MiniDFSCluster cluster;
   static DistributedFileSystem dfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void startUp() throws IOException {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -95,7 +94,7 @@
     dfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     if(dfs != null) {
       dfs.close();
@@ -167,7 +166,7 @@
       {
         //copy all local files to a sub dir to simulate snapshot. 
         final File subDir = new File(localDir, snapshot);
-        Assert.assertFalse(subDir.exists());
+        Assertions.assertFalse(subDir.exists());
         subDir.mkdir();
 
         for(File f : localDir.listFiles(FILE_ONLY)) {
@@ -185,12 +184,12 @@
           .append(snapshot);
 
       final File subDir = new File(localDir, snapshot);
-      Assert.assertTrue(subDir.exists());
+      Assertions.assertTrue(subDir.exists());
       
       final File[] localFiles = subDir.listFiles(FILE_ONLY);
       final Path p = snapshotPaths.get(snapshot);
       final FileStatus[] statuses = dfs.listStatus(p);
-      Assert.assertEquals(localFiles.length, statuses.length);
+      Assertions.assertEquals(localFiles.length, statuses.length);
       b.append(p).append(" vs ").append(subDir).append(", ")
        .append(statuses.length).append(" entries");
       
@@ -374,8 +373,8 @@
 
     static int checkLength(Path file, File localFile) throws IOException {
       final long length = dfs.getFileStatus(file).getLen();
-      Assert.assertEquals(localFile.length(), length);
-      Assert.assertTrue(length <= Integer.MAX_VALUE);
+      Assertions.assertEquals(localFile.length(), length);
+      Assertions.assertTrue(length <= Integer.MAX_VALUE);
       return (int)length;
     }
     
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
index 200fab6..b77cc59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -27,9 +27,9 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestApplyingStoragePolicy {
   private static final short REPL = 1;
@@ -39,7 +39,7 @@
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem fs;
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
@@ -47,7 +47,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void clusterShutdown() throws IOException{
     if(fs != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
index 3191fbd..5652662 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
@@ -17,8 +17,8 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
@@ -34,7 +34,7 @@
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test ensures that the balancer bandwidth is dynamically adjusted
@@ -131,11 +131,11 @@
     try {
       System.setOut(outStream);
       int exitCode = admin.run(args);
-      assertEquals("DFSAdmin should return 0", 0, exitCode);
+        assertEquals(0, exitCode, "DFSAdmin should return 0");
       String bandwidthOutMsg = "Balancer bandwidth is " + expectedBandwidth
           + " bytes per second.";
       String strOut = new String(outContent.toByteArray(), UTF8);
-      assertTrue("Wrong balancer bandwidth!", strOut.contains(bandwidthOutMsg));
+        assertTrue(strOut.contains(bandwidthOutMsg), "Wrong balancer bandwidth!");
     } finally {
       System.setOut(initialStdOut);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java
index 11bfa2f..d057631 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java
@@ -33,10 +33,10 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Lists;
 import org.hamcrest.core.StringContains;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 import java.io.FileNotFoundException;
@@ -46,9 +46,7 @@
 import java.util.List;
 import java.util.Map;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Tests for the batched listing API.
@@ -85,15 +83,15 @@
   private static void assertSubDirEquals(int i, int j, Path p) {
     assertTrue(p.toString().startsWith("hdfs://"));
     Path expected = getSubDirName(i, j);
-    assertEquals("Unexpected subdir name",
-        expected.toString(), p.toUri().getPath());
+      assertEquals(
+              expected.toString(), p.toUri().getPath(), "Unexpected subdir name");
   }
 
   private static void assertFileEquals(int i, int j, int k, Path p) {
     assertTrue(p.toString().startsWith("hdfs://"));
     Path expected = getFileName(i, j, k);
-    assertEquals("Unexpected file name",
-        expected.toString(), p.toUri().getPath());
+      assertEquals(
+              expected.toString(), p.toUri().getPath(), "Unexpected file name");
   }
 
   private static void loadData() throws Exception {
@@ -119,7 +117,7 @@
     dfs.setPermission(INACCESSIBLE_DIR_PATH, new FsPermission(0000));
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void beforeClass() throws Exception {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 7);
@@ -132,7 +130,7 @@
     loadData();
   }
 
-  @AfterClass
+  @AfterAll
   public static void afterClass() {
     if (cluster != null) {
       cluster.shutdown();
@@ -233,8 +231,8 @@
     dfs.setWorkingDirectory(new Path("/dir0"));
     List<Path> paths = Lists.newArrayList(new Path("."));
     List<FileStatus> statuses = getStatuses(paths);
-    assertEquals("Wrong number of items",
-        SECOND_LEVEL_DIRS, statuses.size());
+      assertEquals(
+              SECOND_LEVEL_DIRS, statuses.size(), "Wrong number of items");
     for (int i = 0; i < SECOND_LEVEL_DIRS; i++) {
       FileStatus stat = statuses.get(i);
       assertSubDirEquals(0, i, stat.getPath());
@@ -246,8 +244,8 @@
     dfs.setWorkingDirectory(new Path("/dir0"));
     List<Path> paths = Lists.newArrayList(new Path("subdir0"));
     List<FileStatus> statuses = getStatuses(paths);
-    assertEquals("Wrong number of items",
-        FILES_PER_DIR, statuses.size());
+      assertEquals(
+              FILES_PER_DIR, statuses.size(), "Wrong number of items");
     for (int i = 0; i < FILES_PER_DIR; i++) {
       FileStatus stat = statuses.get(i);
       assertFileEquals(0, 0, i, stat.getPath());
@@ -256,9 +254,9 @@
 
   @Test
   public void testDFSHasCapability() throws Throwable {
-    assertTrue("FS does not declare PathCapability support",
-        dfs.hasPathCapability(new Path("/"),
-            CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING));
+      assertTrue(
+              dfs.hasPathCapability(new Path("/"),
+                      CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING), "FS does not declare PathCapability support");
   }
 
   private void listFilesInternal(int numFiles) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
index c679f6c..0a7b3b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
@@ -31,7 +31,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestBlockMissingException {
   final static Logger LOG =
@@ -116,6 +116,6 @@
       gotException = true;
     }
     stm.close();
-    assertTrue("Expected BlockMissingException ", gotException);
+      assertTrue(gotException, "Expected BlockMissingException ");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 54c3eda..8290f57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -47,9 +48,8 @@
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Sets;
-import org.junit.Assert;
-import static org.junit.Assert.fail;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 /** Test {@link BlockStoragePolicy} */
 public class TestBlockStoragePolicy {
@@ -160,10 +160,10 @@
       final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); 
       if (policy != null) {
         final String s = policy.toString();
-        Assert.assertEquals(expectedPolicyStrings.get(i), s);
+        Assertions.assertEquals(expectedPolicyStrings.get(i), s);
       }
     }
-    Assert.assertEquals(POLICY_SUITE.getPolicy(HOT), POLICY_SUITE.getDefaultPolicy());
+    Assertions.assertEquals(POLICY_SUITE.getPolicy(HOT), POLICY_SUITE.getDefaultPolicy());
     
     // check Cold policy
     final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
@@ -260,11 +260,11 @@
 
   static void assertStorageType(List<StorageType> computed, short replication,
       StorageType... answers) {
-    Assert.assertEquals(replication, computed.size());
+    Assertions.assertEquals(replication, computed.size());
     final StorageType last = answers[answers.length - 1];
     for(int i = 0; i < computed.size(); i++) {
       final StorageType expected = i < answers.length? answers[i]: last;
-      Assert.assertEquals(expected, computed.get(i));
+      Assertions.assertEquals(expected, computed.get(i));
     }
   }
 
@@ -272,27 +272,27 @@
       StorageType noneExpected, StorageType archiveExpected,
       StorageType diskExpected, StorageType ssdExpected,
       StorageType disk_archiveExpected, StorageType nvdimmExpected) {
-    Assert.assertEquals(noneExpected, policy.getCreationFallback(none));
-    Assert.assertEquals(archiveExpected, policy.getCreationFallback(archive));
-    Assert.assertEquals(diskExpected, policy.getCreationFallback(disk));
-    Assert.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
-    Assert.assertEquals(nvdimmExpected, policy.getCreationFallback(nvdimm));
-    Assert.assertEquals(disk_archiveExpected,
+    Assertions.assertEquals(noneExpected, policy.getCreationFallback(none));
+    Assertions.assertEquals(archiveExpected, policy.getCreationFallback(archive));
+    Assertions.assertEquals(diskExpected, policy.getCreationFallback(disk));
+    Assertions.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
+    Assertions.assertEquals(nvdimmExpected, policy.getCreationFallback(nvdimm));
+    Assertions.assertEquals(disk_archiveExpected,
         policy.getCreationFallback(disk_archive));
-    Assert.assertEquals(null, policy.getCreationFallback(all));
+    Assertions.assertEquals(null, policy.getCreationFallback(all));
   }
 
   static void assertReplicationFallback(BlockStoragePolicy policy,
       StorageType noneExpected, StorageType archiveExpected,
       StorageType diskExpected, StorageType ssdExpected,
       StorageType nvdimmExpected) {
-    Assert.assertEquals(noneExpected, policy.getReplicationFallback(none));
-    Assert
+    Assertions.assertEquals(noneExpected, policy.getReplicationFallback(none));
+    Assertions
         .assertEquals(archiveExpected, policy.getReplicationFallback(archive));
-    Assert.assertEquals(diskExpected, policy.getReplicationFallback(disk));
-    Assert.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
-    Assert.assertEquals(nvdimmExpected, policy.getReplicationFallback(nvdimm));
-    Assert.assertEquals(null, policy.getReplicationFallback(all));
+    Assertions.assertEquals(diskExpected, policy.getReplicationFallback(disk));
+    Assertions.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
+    Assertions.assertEquals(nvdimmExpected, policy.getReplicationFallback(nvdimm));
+    Assertions.assertEquals(null, policy.getReplicationFallback(all));
   }
 
   private static interface CheckChooseStorageTypes {
@@ -879,7 +879,7 @@
   static void assertStorageTypes(StorageType[] computed, StorageType... expected) {
     Arrays.sort(expected);
     Arrays.sort(computed);
-    Assert.assertArrayEquals(expected, computed);
+    Assertions.assertArrayEquals(expected, computed);
   }
 
   @Test
@@ -924,9 +924,9 @@
   }
 
   private void checkDirectoryListing(HdfsFileStatus[] stats, byte... policies) {
-    Assert.assertEquals(stats.length, policies.length);
+    Assertions.assertEquals(stats.length, policies.length);
     for (int i = 0; i < stats.length; i++) {
-      Assert.assertEquals(stats[i].getStoragePolicy(), policies[i]);
+      Assertions.assertEquals(stats[i].getStoragePolicy(), policies[i]);
     }
   }
 
@@ -949,7 +949,7 @@
       final String invalidPolicyName = "INVALID-POLICY";
       try {
         fs.setStoragePolicy(fooFile, invalidPolicyName);
-        Assert.fail("Should throw a HadoopIllegalArgumentException");
+        Assertions.fail("Should throw a HadoopIllegalArgumentException");
       } catch (RemoteException e) {
         GenericTestUtils.assertExceptionContains(invalidPolicyName, e);
       }
@@ -967,14 +967,14 @@
       final Path invalidPath = new Path("/invalidPath");
       try {
         fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
-        Assert.fail("Should throw a FileNotFoundException");
+        Assertions.fail("Should throw a FileNotFoundException");
       } catch (FileNotFoundException e) {
         GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
       }
 
       try {
         fs.getStoragePolicy(invalidPath);
-        Assert.fail("Should throw a FileNotFoundException");
+        Assertions.fail("Should throw a FileNotFoundException");
       } catch (FileNotFoundException e) {
         GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
       }
@@ -982,15 +982,15 @@
       fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
       fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
       fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
-      Assert.assertEquals("File storage policy should be COLD",
-          HdfsConstants.COLD_STORAGE_POLICY_NAME,
-          fs.getStoragePolicy(fooFile).getName());
-      Assert.assertEquals("File storage policy should be WARM",
-          HdfsConstants.WARM_STORAGE_POLICY_NAME,
-          fs.getStoragePolicy(barDir).getName());
-      Assert.assertEquals("File storage policy should be HOT",
-          HdfsConstants.HOT_STORAGE_POLICY_NAME,
-          fs.getStoragePolicy(barFile2).getName());
+        Assertions.assertEquals(
+                HdfsConstants.COLD_STORAGE_POLICY_NAME,
+                fs.getStoragePolicy(fooFile).getName(), "File storage policy should be COLD");
+        Assertions.assertEquals(
+                HdfsConstants.WARM_STORAGE_POLICY_NAME,
+                fs.getStoragePolicy(barDir).getName(), "File storage policy should be WARM");
+        Assertions.assertEquals(
+                HdfsConstants.HOT_STORAGE_POLICY_NAME,
+                fs.getStoragePolicy(barFile2).getName(), "File storage policy should be HOT");
 
       dirList = fs.getClient().listPaths(dir.toString(),
           HdfsFileStatus.EMPTY_NAME).getPartialListing();
@@ -1040,8 +1040,8 @@
           HdfsConstants.COLD_STORAGE_POLICY_NAME);
       String policyName = client.getStoragePolicy("/testGetStoragePolicy/foo")
           .getName();
-      Assert.assertEquals("File storage policy should be COLD",
-          HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName);
+        Assertions.assertEquals(
+                HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName, "File storage policy should be COLD");
     } finally {
       cluster.shutdown();
     }
@@ -1140,14 +1140,14 @@
     List<StorageType> typeList = Lists.newArrayList();
     Collections.addAll(typeList, types);
     LocatedBlocks lbs = status.getLocatedBlocks();
-    Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
+    Assertions.assertEquals(blockNum, lbs.getLocatedBlocks().size());
     for (LocatedBlock lb : lbs.getLocatedBlocks()) {
-      Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
+      Assertions.assertEquals(replicaNum, lb.getStorageTypes().length);
       for (StorageType type : lb.getStorageTypes()) {
-        Assert.assertTrue(typeList.remove(type));
+        Assertions.assertTrue(typeList.remove(type));
       }
     }
-    Assert.assertTrue(typeList.isEmpty());
+    Assertions.assertTrue(typeList.isEmpty());
   }
 
   private void testChangeFileRep(String policyName, byte policyId,
@@ -1285,12 +1285,12 @@
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         new HashSet<Node>(), 0, policy1, null);
     System.out.println(Arrays.asList(targets));
-    Assert.assertEquals(3, targets.length);
+    Assertions.assertEquals(3, targets.length);
     targets = replicator.chooseTarget("/foo", 3,
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         new HashSet<Node>(), 0, policy2, null);
     System.out.println(Arrays.asList(targets));
-    Assert.assertEquals(3, targets.length);
+    Assertions.assertEquals(3, targets.length);
   }
 
   @Test
@@ -1332,9 +1332,9 @@
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         new HashSet<Node>(), 0, policy, null);
     System.out.println(policy.getName() + ": " + Arrays.asList(targets));
-    Assert.assertEquals(2, targets.length);
-    Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
-    Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
+    Assertions.assertEquals(2, targets.length);
+    Assertions.assertEquals(StorageType.SSD, targets[0].getStorageType());
+    Assertions.assertEquals(StorageType.DISK, targets[1].getStorageType());
   }
 
   @Test
@@ -1360,17 +1360,17 @@
       // 4. Set Dir policy
       fs.setStoragePolicy(dir, "HOT");
       HdfsFileStatus status = fs.getClient().getFileInfo(file);
-      // 5. get file policy, it should be parent policy.
-      Assert
-          .assertTrue("File storage policy should be HOT",
-              status.getStoragePolicy() == HOT);
+        // 5. get file policy, it should be parent policy.
+        Assertions
+                .assertTrue(
+                        status.getStoragePolicy() == HOT, "File storage policy should be HOT");
       // 6. restart NameNode for reloading edits logs.
       cluster.restartNameNode(true);
       // 7. get file policy, it should be parent policy.
       status = fs.getClient().getFileInfo(file);
-      Assert
-          .assertTrue("File storage policy should be HOT",
-              status.getStoragePolicy() == HOT);
+        Assertions
+                .assertTrue(
+                        status.getStoragePolicy() == HOT, "File storage policy should be HOT");
 
     } finally {
       cluster.shutdown();
@@ -1408,8 +1408,8 @@
       }
 
       // Ensure that we got the same set of policies in both cases.
-      Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
-      Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
+      Assertions.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
+      Assertions.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
     } finally {
       cluster.shutdown();
     }
@@ -1428,21 +1428,21 @@
 
     {
       final Iterator<StorageType> i = map.keySet().iterator();
-      Assert.assertEquals(StorageType.RAM_DISK, i.next());
-      Assert.assertEquals(StorageType.SSD, i.next());
-      Assert.assertEquals(StorageType.DISK, i.next());
-      Assert.assertEquals(StorageType.ARCHIVE, i.next());
-      Assert.assertEquals(StorageType.NVDIMM, i.next());
+      Assertions.assertEquals(StorageType.RAM_DISK, i.next());
+      Assertions.assertEquals(StorageType.SSD, i.next());
+      Assertions.assertEquals(StorageType.DISK, i.next());
+      Assertions.assertEquals(StorageType.ARCHIVE, i.next());
+      Assertions.assertEquals(StorageType.NVDIMM, i.next());
     }
 
     {
       final Iterator<Map.Entry<StorageType, Integer>> i
           = map.entrySet().iterator();
-      Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
-      Assert.assertEquals(StorageType.SSD, i.next().getKey());
-      Assert.assertEquals(StorageType.DISK, i.next().getKey());
-      Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
-      Assert.assertEquals(StorageType.NVDIMM, i.next().getKey());
+      Assertions.assertEquals(StorageType.RAM_DISK, i.next().getKey());
+      Assertions.assertEquals(StorageType.SSD, i.next().getKey());
+      Assertions.assertEquals(StorageType.DISK, i.next().getKey());
+      Assertions.assertEquals(StorageType.ARCHIVE, i.next().getKey());
+      Assertions.assertEquals(StorageType.NVDIMM, i.next().getKey());
     }
   }
 
@@ -1600,7 +1600,7 @@
   public void testCreateDefaultPoliciesFromConf() {
     BlockStoragePolicySuite suite =
         BlockStoragePolicySuite.createDefaultSuite();
-    Assert.assertEquals(HdfsConstants.StoragePolicy.HOT.value(),
+    Assertions.assertEquals(HdfsConstants.StoragePolicy.HOT.value(),
         suite.getDefaultPolicy().getId());
 
     Configuration newConf = new Configuration();
@@ -1608,7 +1608,7 @@
         HdfsConstants.StoragePolicy.ONE_SSD);
     BlockStoragePolicySuite suiteConf =
         BlockStoragePolicySuite.createDefaultSuite(newConf);
-    Assert.assertEquals(HdfsConstants.StoragePolicy.ONE_SSD.value(),
+    Assertions.assertEquals(HdfsConstants.StoragePolicy.ONE_SSD.value(),
         suiteConf.getDefaultPolicy().getId());
   }
 
@@ -1627,7 +1627,7 @@
       DFSTestUtil.createFile(newfs, fooFile, 0, REPLICATION, 0L);
 
       String policy = newfs.getStoragePolicy(fooFile).getName();
-      Assert.assertEquals(HdfsConstants.StoragePolicy.WARM.name(), policy);
+      Assertions.assertEquals(HdfsConstants.StoragePolicy.WARM.name(), policy);
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
index c224c49..1b5431d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
@@ -36,14 +36,15 @@
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.security.TestPermission;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
@@ -77,7 +78,7 @@
     this.qopValue = qopValue;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = createSecureConfig(this.configKey);
     conf.set(DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY, "12000");
@@ -109,7 +110,7 @@
     dfs = (DistributedFileSystem) FileSystem.get(uriAuxiliary, conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
index 95d6825..574474d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -35,8 +35,8 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests DatanodeDescriptor.getBlocksScheduled() at the
@@ -47,7 +47,7 @@
   MiniDFSCluster cluster = null;
   FileSystem fs = null;
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (fs != null) {
       fs.close();
@@ -104,8 +104,8 @@
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
     datanodeManager.fetchDatanodes(dnList, dnList, false);
     for (DatanodeDescriptor descriptor : dnList) {
-      assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(),
-          0, descriptor.getBlocksScheduled());
+        assertEquals(
+                0, descriptor.getBlocksScheduled(), "Blocks scheduled should be 0 for " + descriptor.getName());
     }
 
     cluster.getDataNodes().get(0).shutdown();
@@ -120,21 +120,21 @@
 
     DatanodeDescriptor abandonedDn = datanodeManager.getDatanode(cluster
         .getDataNodes().get(0).getDatanodeId());
-    assertEquals("for the abandoned dn scheduled counts should be 0", 0,
-        abandonedDn.getBlocksScheduled());
+      assertEquals(0,
+              abandonedDn.getBlocksScheduled(), "for the abandoned dn scheduled counts should be 0");
 
     for (DatanodeDescriptor descriptor : dnList) {
       if (descriptor.equals(abandonedDn)) {
         continue;
       }
-      assertEquals("Blocks scheduled should be 1 for " + descriptor.getName(),
-          1, descriptor.getBlocksScheduled());
+        assertEquals(
+                1, descriptor.getBlocksScheduled(), "Blocks scheduled should be 1 for " + descriptor.getName());
     }
     // close the file and the counter should go to zero.
     out.close();
     for (DatanodeDescriptor descriptor : dnList) {
-      assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(),
-          0, descriptor.getBlocksScheduled());
+        assertEquals(
+                0, descriptor.getBlocksScheduled(), "Blocks scheduled should be 0 for " + descriptor.getName());
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
index 1c7f150..6b51a52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
@@ -28,14 +28,11 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * This class tests the DFS positional read functionality on a single node
@@ -55,7 +52,7 @@
   private static final int BLOCK_SIZE = 4096;
   private static final int FILE_SIZE = 12 * BLOCK_SIZE;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     // Setup the cluster with a small block size so we can create small files
     // that span multiple blocks
@@ -278,7 +275,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws IOException {
     try {
       fs.delete(testFile, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
index 3b766f9..5db76b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
@@ -17,9 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -31,6 +30,8 @@
 
 import java.util.function.Supplier;
 
+import static org.junit.jupiter.api.Assertions.assertFalse;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -54,8 +55,8 @@
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -84,9 +85,9 @@
       // test getNewStampAndToken on a finalized block
       try {
         namenode.updateBlockForPipeline(firstBlock, "");
-        Assert.fail("Can not get a new GS from a finalized block");
+        Assertions.fail("Can not get a new GS from a finalized block");
       } catch (IOException e) {
-        Assert.assertTrue(e.getMessage().contains(
+        Assertions.assertTrue(e.getMessage().contains(
             "not " + BlockUCState.UNDER_CONSTRUCTION));
       }
       
@@ -96,9 +97,9 @@
         ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(),
             newBlockId, 0, firstBlock.getGenerationStamp());
         namenode.updateBlockForPipeline(newBlock, "");
-        Assert.fail("Cannot get a new GS from a non-existent block");
+        Assertions.fail("Cannot get a new GS from a non-existent block");
       } catch (IOException e) {
-        Assert.assertTrue(e.getMessage().contains("does not exist"));
+        Assertions.assertTrue(e.getMessage().contains("does not exist"));
       }
 
       
@@ -122,17 +123,17 @@
         DFSClient dfs = ((DistributedFileSystem)fileSys).dfs;
         try {
           namenode.updateBlockForPipeline(firstBlock, "test" + dfs.clientName);
-          Assert.fail("Cannot get a new GS for a non lease holder");
+          Assertions.fail("Cannot get a new GS for a non lease holder");
         } catch (LeaseExpiredException e) {
-          Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
+          Assertions.assertTrue(e.getMessage().startsWith("Lease mismatch"));
         }
 
         // test null lease holder
         try {
           namenode.updateBlockForPipeline(firstBlock, null);
-          Assert.fail("Cannot get a new GS for a null lease holder");
+          Assertions.fail("Cannot get a new GS for a null lease holder");
         } catch (LeaseExpiredException e) {
-          Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
+          Assertions.assertTrue(e.getMessage().startsWith("Lease mismatch"));
         }
 
         // test getNewStampAndToken on a rbw block
@@ -177,7 +178,7 @@
         // Test will fail with BlockMissingException if NN does not update the
         // replica state based on the latest report.
       } catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
-        Assert.fail("Block is missing because the file was closed with"
+        Assertions.fail("Block is missing because the file was closed with"
             + " corrupt replicas.");
       }
     } finally {
@@ -239,7 +240,7 @@
           contains = true;
         }
       }
-      Assert.assertTrue(contains);
+      Assertions.assertTrue(contains);
     } finally {
       DataNodeFaultInjector.set(oldDnInjector);
       if (cluster != null) {
@@ -322,7 +323,7 @@
       final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
       // issue shutdown to the datanode.
       final String[] args1 = {"-shutdownDatanode", dnAddr, "upgrade" };
-      Assert.assertEquals(0, dfsadmin.run(args1));
+      Assertions.assertEquals(0, dfsadmin.run(args1));
       // Wait long enough to receive an OOB ack before closing the file.
       GenericTestUtils.waitForThreadTermination(
           "Async datanode shutdown thread", 100, 10000);
@@ -358,23 +359,23 @@
       // get nodes in the pipeline
       DFSOutputStream dfsOut = (DFSOutputStream)out.getWrappedStream();
       DatanodeInfo[] nodes = dfsOut.getPipeline();
-      Assert.assertEquals(2, nodes.length);
+      Assertions.assertEquals(2, nodes.length);
       String dnAddr = nodes[1].getIpcAddr(false);
 
       // evict the writer from the second datanode and wait until
       // the pipeline is rebuilt.
       DFSAdmin dfsadmin = new DFSAdmin(conf);
       final String[] args1 = {"-evictWriters", dnAddr };
-      Assert.assertEquals(0, dfsadmin.run(args1));
+      Assertions.assertEquals(0, dfsadmin.run(args1));
       out.write(0x31);
       out.hflush();
 
       // get the new pipline and check the node is not in there.
       nodes = dfsOut.getPipeline();
       try {
-        Assert.assertTrue(nodes.length > 0 );
+        Assertions.assertTrue(nodes.length > 0 );
         for (int i = 0; i < nodes.length; i++) {
-          Assert.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
+          Assertions.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
         }
       } finally {
         out.close();
@@ -410,7 +411,7 @@
       final String dnAddr1 = dn.getDatanodeId().getIpcAddr(false);
       // issue shutdown to the datanode.
       final String[] args1 = {"-shutdownDatanode", dnAddr1, "upgrade" };
-      Assert.assertEquals(0, dfsadmin.run(args1));
+      Assertions.assertEquals(0, dfsadmin.run(args1));
       GenericTestUtils.waitForThreadTermination(
           "Async datanode shutdown thread", 100, 10000);
       // This should succeed without restarting the node. The restart will
@@ -427,7 +428,7 @@
       final String dnAddr2 = dn.getDatanodeId().getIpcAddr(false);
       // issue shutdown to the datanode.
       final String[] args2 = {"-shutdownDatanode", dnAddr2, "upgrade" };
-      Assert.assertEquals(0, dfsadmin.run(args2));
+      Assertions.assertEquals(0, dfsadmin.run(args2));
       GenericTestUtils.waitForThreadTermination(
           "Async datanode shutdown thread", 100, 10000);
       try {
@@ -480,8 +481,8 @@
           return out.getBlock().getGenerationStamp() > oldGs;
         }
       }, 100, 10000);
-      Assert.assertEquals("The pipeline recovery count shouldn't increase",
-          0, out.getStreamer().getPipelineRecoveryCount());
+        Assertions.assertEquals(
+                0, out.getStreamer().getPipelineRecoveryCount(), "The pipeline recovery count shouldn't increase");
       out.write(1);
       out.close();
       // Ensure that subsequent closes are idempotent and do not throw errors
@@ -539,7 +540,7 @@
       Thread.sleep(1000);
       DatanodeInfo[] pipeline = out.getPipeline();
       for (DatanodeInfo node : pipeline) {
-        assertFalse("Write should be going on", failed.get());
+          assertFalse(failed.get(), "Write should be going on");
         ArrayList<DataNode> dataNodes = cluster.getDataNodes();
         int indexToShutdown = 0;
         for (int i = 0; i < dataNodes.size(); i++) {
@@ -564,15 +565,15 @@
             return out.getBlock().getGenerationStamp() > oldGs;
           }
         }, 100, 10000);
-        Assert.assertEquals("The pipeline recovery count shouldn't increase", 0,
-            out.getStreamer().getPipelineRecoveryCount());
+          Assertions.assertEquals(0,
+                  out.getStreamer().getPipelineRecoveryCount(), "The pipeline recovery count shouldn't increase");
       }
-      assertFalse("Write should be going on", failed.get());
+        assertFalse(failed.get(), "Write should be going on");
       running.set(false);
       t.join();
       out.write("testagain".getBytes());
-      assertTrue("There should be atleast 2 nodes in pipeline still", out
-          .getPipeline().length >= 2);
+        assertTrue(out
+                .getPipeline().length >= 2, "There should be atleast 2 nodes in pipeline still");
       out.close();
     } finally {
       DFSClientFaultInjector.set(old);
@@ -723,7 +724,7 @@
         o.hflush();
       }
 
-      assertTrue("Expected a failure in the pipeline", failed.get());
+        assertTrue(failed.get(), "Expected a failure in the pipeline");
       DatanodeInfo[] newNodes = dfsO.getStreamer().getNodes();
       o.close();
       // Trigger block report to NN
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
index 2f5aa96..723d3b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
@@ -41,10 +41,10 @@
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Class is used to test client reporting corrupted block replica to name node.
@@ -67,7 +67,7 @@
 
   Random rand = new Random();
 
-  @Before
+  @BeforeEach
   public void startUpCluster() throws IOException {
     // disable block scanner
     conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); 
@@ -80,7 +80,7 @@
     buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (dfs != null) {
       dfs.close();
@@ -211,7 +211,7 @@
     // Locate the file blocks by asking name node
     final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
         .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
-    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
+    Assertions.assertEquals(repl, locatedblocks.get(0).getLocations().length);
     // The file only has one block
     LocatedBlock lblock = locatedblocks.get(0);
     DatanodeInfo[] datanodeinfos = lblock.getLocations();
@@ -236,7 +236,7 @@
     final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode()
         .getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
     final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
-    Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
+    Assertions.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
   }
 
   /**
@@ -250,7 +250,7 @@
         filePath.toUri().getPath(), 0, Long.MAX_VALUE);
     // we expect only the first block of the file is used for this test
     LocatedBlock firstLocatedBlock = lBlocks.get(0);
-    Assert.assertEquals(expectedReplicas,
+    Assertions.assertEquals(expectedReplicas,
         firstLocatedBlock.getLocations().length);
   }
 
@@ -300,23 +300,23 @@
     // Make sure filesystem is in healthy state
     String outStr = runFsck(conf, 0, true, "/");
     LOG.info(outStr);
-    Assert.assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    Assertions.assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     if (!expected.equals("")) {
-      Assert.assertTrue(outStr.contains(expected));
+      Assertions.assertTrue(outStr.contains(expected));
     }
   }
 
   private static void verifyFsckBlockCorrupted() throws Exception {
     String outStr = runFsck(conf, 1, true, "/");
     LOG.info(outStr);
-    Assert.assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+    Assertions.assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
   }
   
   private static void testFsckListCorruptFilesBlocks(Path filePath, int errorCode) throws Exception{
     String outStr = runFsck(conf, errorCode, true, filePath.toString(), "-list-corruptfileblocks");
     LOG.info("fsck -list-corruptfileblocks out: " + outStr);
     if (errorCode != 0) {
-      Assert.assertTrue(outStr.contains("CORRUPT blocks"));
+      Assertions.assertTrue(outStr.contains("CORRUPT blocks"));
     }
   }
 
@@ -326,7 +326,7 @@
     PrintStream out = new PrintStream(bStream, true);
     int errCode = ToolRunner.run(new DFSck(conf, out), path);
     if (checkErrorCode)
-      Assert.assertEquals(expectedErrCode, errCode);
+      Assertions.assertEquals(expectedErrCode, errCode);
     return bStream.toString();
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
index 94f3612..db35a6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -26,7 +26,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestClose {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
index 85a4d19..a271094 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -27,8 +27,8 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the client connection caching in a single node
@@ -52,7 +52,7 @@
                      int length,
                      byte[] authenticData)
       throws IOException {
-    Assert.assertTrue("Test buffer too small", buffer.length >= offset + length);
+      Assertions.assertTrue(buffer.length >= offset + length, "Test buffer too small");
 
     if (pos >= 0)
       in.seek(pos);
@@ -62,7 +62,7 @@
 
     while (length > 0) {
       int cnt = in.read(buffer, offset, length);
-      Assert.assertTrue("Error in read", cnt > 0);
+        Assertions.assertTrue(cnt > 0, "Error in read");
       offset += cnt;
       length -= cnt;
     }
@@ -71,9 +71,9 @@
     for (int i = 0; i < length; ++i) {
       byte actual = buffer[i];
       byte expect = authenticData[(int)pos + i];
-      assertEquals("Read data mismatch at file offset " + (pos + i) +
-                   ". Expects " + expect + "; got " + actual,
-                   actual, expect);
+        assertEquals(
+                actual, expect, "Read data mismatch at file offset " + (pos + i) +
+                ". Expects " + expect + "; got " + actual);
     }
   }
 
@@ -116,7 +116,7 @@
 
     in.close();
     client.close();
-    Assert.assertEquals(1,
+    Assertions.assertEquals(1,
         ClientContext.getFromConf(configuration).getPeerCache().size());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
index 917f0db..2ba7ad6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.List;
@@ -36,8 +34,8 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -75,7 +73,7 @@
 
   private DFSClientFaultInjector faultInjector;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     faultInjector = Mockito.mock(DFSClientFaultInjector.class);
     DFSClientFaultInjector.set(faultInjector);
@@ -174,7 +172,7 @@
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       List<ReplicaInfo> replicas =
           dn.getFSDataset().getFinalizedBlocks(bpid);
-      assertTrue("Replicas do not exist", !replicas.isEmpty());
+        assertTrue(!replicas.isEmpty(), "Replicas do not exist");
 
       for (int idx = 0; idx < replicas.size(); idx++) {
         ReplicaInfo replica = replicas.get(idx);
@@ -192,12 +190,12 @@
         }
       }
 
-      //
-      // Only one replica is possibly corrupted. The other replica should still
-      // be good. Verify.
-      //
-      assertTrue("Corrupted replicas not handled properly.",
-                 util.checkFiles(fs, "/srcdat"));
+        //
+        // Only one replica is possibly corrupted. The other replica should still
+        // be good. Verify.
+        //
+        assertTrue(
+                util.checkFiles(fs, "/srcdat"), "Corrupted replicas not handled properly.");
       LOG.info("All File still have a valid replica");
 
       //
@@ -287,7 +285,7 @@
 
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
       int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
-      assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);
+        assertEquals(replFactor, blockFilesCorrupted, "All replicas not corrupted");
 
       try {
         IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
index c61c0b1..54920f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -27,8 +27,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -37,7 +37,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 public class TestDFSAddressConfig {
@@ -67,7 +67,7 @@
      *------------------------------------------------------------------------*/
     for (int i = 0; i < dns.size(); i++) {
       DataNodeProperties dnp = cluster.stopDataNode(i);
-      assertNotNull("Should have been able to stop simulated datanode", dnp);
+        assertNotNull(dnp, "Should have been able to stop simulated datanode");
     }
 
     conf.unset(DFS_DATANODE_ADDRESS_KEY);
@@ -92,7 +92,7 @@
      *------------------------------------------------------------------------*/
     for (int i = 0; i < dns.size(); i++) {
       DataNodeProperties dnp = cluster.stopDataNode(i);
-      assertNotNull("Should have been able to stop simulated datanode", dnp);
+        assertNotNull(dnp, "Should have been able to stop simulated datanode");
     }
 
     conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
index 59cc154..49d4a91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -29,10 +29,10 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.util.ThreadUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 
 /**
@@ -44,13 +44,13 @@
   private MiniDFSCluster cluster;
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     cluster = null;
     conf = new HdfsConfiguration();
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -122,8 +122,8 @@
 
     // Bring back the older DNs, since they are gonna be forgiven only
     // afterwards of this previous block write.
-    Assert.assertEquals(true, cluster.restartDataNode(one, true));
-    Assert.assertEquals(true, cluster.restartDataNode(two, true));
+    Assertions.assertEquals(true, cluster.restartDataNode(one, true));
+    Assertions.assertEquals(true, cluster.restartDataNode(two, true));
     cluster.waitActive();
 
     // Sleep for 5s, to let the excluded nodes be expired
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
index f65bc3a..746714a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.lang.reflect.Field;
@@ -34,6 +31,7 @@
 
 import javax.net.SocketFactory;
 
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -55,10 +53,9 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Assume;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 
@@ -74,7 +71,7 @@
   private final Configuration conf = new Configuration();
   private MiniDFSCluster cluster;
   
-  @Before
+  @BeforeEach
   public void setUpCluster() throws IOException {
     cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleHATopology())
@@ -83,7 +80,7 @@
     cluster.waitActive();
   }
   
-  @After
+  @AfterEach
   public void tearDownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -91,7 +88,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void clearConfig() {
     SecurityUtil.setTokenServiceUseIp(true);
   }
@@ -217,9 +214,9 @@
       fail("Successfully got proxy provider for misconfigured FS");
     } catch (IOException ioe) {
       LOG.info("got expected exception", ioe);
-      assertTrue("expected exception did not contain helpful message",
-          StringUtils.stringifyException(ioe).contains(
-          "Could not find any configured addresses for URI " + uri));
+        assertTrue(
+                StringUtils.stringifyException(ioe).contains(
+                        "Could not find any configured addresses for URI " + uri), "expected exception did not contain helpful message");
     }
   }
 
@@ -233,7 +230,7 @@
     try {
       Field f = InetAddress.class.getDeclaredField("nameServices");
       f.setAccessible(true);
-      Assume.assumeNotNull(f);
+      Assertions.assertNotNull(f);
       @SuppressWarnings("unchecked")
       List<NameService> nsList = (List<NameService>) f.get(null);
 
@@ -248,7 +245,7 @@
       LOG.info("Unable to spy on DNS. Skipping test.", t);
       // In case the JDK we're testing on doesn't work like Sun's, just
       // skip the test.
-      Assume.assumeNoException(t);
+      // Assume.assumeNoException(t);  // TODO: Should be safe to remove this?
       throw new RuntimeException(t);
     }
   }
@@ -377,9 +374,9 @@
     // not to use IP address for token service
     SecurityUtil.setTokenServiceUseIp(false);
 
-    // Logical URI should be used.
-    assertTrue("Legacy proxy providers should use logical URI.",
-        HAUtil.useLogicalUri(config, p.toUri()));
+      // Logical URI should be used.
+      assertTrue(
+              HAUtil.useLogicalUri(config, p.toUri()), "Legacy proxy providers should use logical URI.");
   }
 
   /**
@@ -394,8 +391,8 @@
         nnUri.getHost(),
         IPFailoverProxyProvider.class.getName());
 
-    assertFalse("IPFailoverProxyProvider should not use logical URI.",
-        HAUtil.useLogicalUri(config, nnUri));
+      assertFalse(
+              HAUtil.useLogicalUri(config, nnUri), "IPFailoverProxyProvider should not use logical URI.");
   }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 970003b..229cf2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -90,9 +87,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
@@ -160,7 +157,7 @@
     }
   }
   
-  @Before
+  @BeforeEach
   public void setupConf(){
     conf = new HdfsConfiguration();
   }
@@ -285,8 +282,8 @@
     try {
       os.close();
     } catch (Exception e) {
-      assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
-           e.getMessage().equals(exceptionMsg));
+        assertTrue(
+                e.getMessage().equals(exceptionMsg), "Retries are not being stopped correctly: " + e.getMessage());
     }
   }
 
@@ -632,7 +629,7 @@
     timestamp = Time.now();
     pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
     timestamp2 = Time.now();
-    assertTrue("Something wrong! Test 2 got Exception with maxmum retries!", pass);
+      assertTrue(pass, "Something wrong! Test 2 got Exception with maxmum retries!");
     LOG.info("Test 2 succeeded! Time spent: "  + (timestamp2-timestamp)/1000.0 + " sec.");
     
     //
@@ -657,7 +654,7 @@
     timestamp = Time.now();
     pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
     timestamp2 = Time.now();
-    assertTrue("Something wrong! Test 4 got Exception with maxmum retries!", pass);
+      assertTrue(pass, "Something wrong! Test 4 got Exception with maxmum retries!");
     LOG.info("Test 4 succeeded! Time spent: "  + (timestamp2-timestamp)/1000.0 + " sec.");
   }
 
@@ -692,10 +689,10 @@
                                          bufferSize,
                                          replicationFactor,
                                          blockSize);
-      
-      // verify that file exists in FS namespace
-      assertTrue(file1 + " should be a file", 
-                  fs.getFileStatus(file1).isFile());
+
+        // verify that file exists in FS namespace
+        assertTrue(
+                fs.getFileStatus(file1).isFile(), file1 + " should be a file");
       System.out.println("Path : \"" + file1 + "\"");
       LOG.info("Path : \"" + file1 + "\"");
 
@@ -706,10 +703,10 @@
 
       // verify that file size has changed to the full size
       long len = fs.getFileStatus(file1).getLen();
-      
-      assertTrue(file1 + " should be of size " + fileLen +
-                 " but found to be of size " + len, 
-                  len == fileLen);
+
+        assertTrue(
+                len == fileLen, file1 + " should be of size " + fileLen +
+                " but found to be of size " + len);
       
       // read back and check data integrigy
       byte[] read_buf = new byte[fileLen];
@@ -809,11 +806,11 @@
         in.close();
         fs.close();
 
-        assertTrue("hashed keys are not the same size",
-                   hash_sha.length == expected_sha.length);
+          assertTrue(
+                  hash_sha.length == expected_sha.length, "hashed keys are not the same size");
 
-        assertTrue("hashed keys are not equal",
-                   Arrays.equals(hash_sha, expected_sha));
+          assertTrue(
+                  Arrays.equals(hash_sha, expected_sha), "hashed keys are not equal");
         
         counter.inc(); // count this thread as successful
         
@@ -928,8 +925,8 @@
 
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, path);
       int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
-      assertEquals("All replicas not corrupted", REPL_FACTOR,
-          blockFilesCorrupted);
+        assertEquals(REPL_FACTOR,
+                blockFilesCorrupted, "All replicas not corrupted");
 
       InetSocketAddress nnAddr =
         new InetSocketAddress("localhost", cluster.getNameNodePort());
@@ -1107,13 +1104,13 @@
         final FSDataInputStream in = fs.open(file4);
         int count = 0;
         for(int r; (r = in.read()) != -1; count++) {
-          Assert.assertEquals(String.format("count=%d", count),
-              bytes[count % bytes.length], (byte)r);
+            Assertions.assertEquals(
+                    bytes[count % bytes.length], (byte) r, String.format("count=%d", count));
         }
         if (!isWebHDFS) {
-          Assert.assertEquals(5 * bytes.length, count);
+          Assertions.assertEquals(5 * bytes.length, count);
         } else {
-          Assert.assertEquals(2 * bytes.length, count);
+          Assertions.assertEquals(2 * bytes.length, count);
         }
         in.close();
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
index 1e6f03a..e5b7ac6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
@@ -20,9 +20,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Test;
-
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -31,7 +29,7 @@
 import java.net.Socket;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestDFSClientSocketSize {
   private static final Logger LOG = LoggerFactory.getLogger(
@@ -49,8 +47,8 @@
     final int sendBufferSize = getSendBufferSize(new Configuration());
     LOG.info("If not specified, the auto tuned send buffer size is: {}",
         sendBufferSize);
-    assertTrue("Send buffer size should be non-negative value which is " +
-        "determined by system (kernel).", sendBufferSize > 0);
+      assertTrue(sendBufferSize > 0, "Send buffer size should be non-negative value which is " +
+              "determined by system (kernel).");
   }
 
   /**
@@ -69,8 +67,8 @@
 
     LOG.info("Large buf size is {}, small is {}",
         sendBufferSize1, sendBufferSize2);
-    assertTrue("Larger specified send buffer should have effect",
-        sendBufferSize1 > sendBufferSize2);
+      assertTrue(
+              sendBufferSize1 > sendBufferSize2, "Larger specified send buffer should have effect");
   }
 
   /**
@@ -83,8 +81,8 @@
     conf.setInt(DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY, 0);
     final int sendBufferSize = getSendBufferSize(conf);
     LOG.info("The auto tuned send buffer size is: {}", sendBufferSize);
-    assertTrue("Send buffer size should be non-negative value which is " +
-        "determined by system (kernel).", sendBufferSize > 0);
+      assertTrue(sendBufferSize > 0, "Send buffer size should be non-negative value which is " +
+              "determined by system (kernel).");
   }
 
   private int getSendBufferSize(Configuration conf) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
index 01210d2..b15faab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import java.io.File;
 import java.util.Collections;
@@ -33,8 +33,8 @@
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test ensures the appropriate response from the system when 
@@ -184,7 +184,7 @@
     } // end numDir loop
   }
  
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
index 05d3c63..d84048a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
@@ -35,8 +35,8 @@
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.util.ExitUtil;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -60,8 +60,8 @@
   }
 
   private static long checkTxid(EventBatch batch, long prevTxid){
-    Assert.assertTrue("Previous txid " + prevTxid + " was not less than " +
-        "new txid " + batch.getTxid(), prevTxid < batch.getTxid());
+      Assertions.assertTrue(prevTxid < batch.getTxid(), "Previous txid " + prevTxid + " was not less than " +
+              "new txid " + batch.getTxid());
     return batch.getTxid();
   }
 
@@ -73,7 +73,7 @@
    */
   @Test
   public void testOpcodeCount() {
-    Assert.assertEquals(54, FSEditLogOpCodes.values().length);
+    Assertions.assertEquals(54, FSEditLogOpCodes.values().length);
   }
 
 
@@ -146,287 +146,287 @@
 
       // RenameOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       long txid = batch.getTxid();
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
       Event.RenameEvent re = (Event.RenameEvent) batch.getEvents()[0];
-      Assert.assertEquals("/file4", re.getDstPath());
-      Assert.assertEquals("/file", re.getSrcPath());
-      Assert.assertTrue(re.getTimestamp() > 0);
+      Assertions.assertEquals("/file4", re.getDstPath());
+      Assertions.assertEquals("/file", re.getSrcPath());
+      Assertions.assertTrue(re.getTimestamp() > 0);
       LOG.info(re.toString());
-      Assert.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
+      Assertions.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
 
       long eventsBehind = eis.getTxidsBehindEstimate();
 
       // RenameOldOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
       Event.RenameEvent re2 = (Event.RenameEvent) batch.getEvents()[0];
-      Assert.assertTrue(re2.getDstPath().equals("/file2"));
-      Assert.assertTrue(re2.getSrcPath().equals("/file4"));
-      Assert.assertTrue(re2.getTimestamp() > 0);
+      Assertions.assertTrue(re2.getDstPath().equals("/file2"));
+      Assertions.assertTrue(re2.getSrcPath().equals("/file4"));
+      Assertions.assertTrue(re2.getTimestamp() > 0);
       LOG.info(re2.toString());
 
       // AddOp with overwrite
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
       Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
-      Assert.assertTrue(ce.getPath().equals("/file2"));
-      Assert.assertTrue(ce.getCtime() > 0);
-      Assert.assertTrue(ce.getReplication() > 0);
-      Assert.assertTrue(ce.getSymlinkTarget() == null);
-      Assert.assertTrue(ce.getOverwrite());
-      Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
-      Assert.assertTrue(ce.isErasureCoded().isPresent());
-      Assert.assertFalse(ce.isErasureCoded().get());
+      Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+      Assertions.assertTrue(ce.getPath().equals("/file2"));
+      Assertions.assertTrue(ce.getCtime() > 0);
+      Assertions.assertTrue(ce.getReplication() > 0);
+      Assertions.assertTrue(ce.getSymlinkTarget() == null);
+      Assertions.assertTrue(ce.getOverwrite());
+      Assertions.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
+      Assertions.assertTrue(ce.isErasureCoded().isPresent());
+      Assertions.assertFalse(ce.isErasureCoded().get());
       LOG.info(ce.toString());
-      Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+      Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
 
       // CloseOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
       Event.CloseEvent ce2 = (Event.CloseEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce2.getPath().equals("/file2"));
-      Assert.assertTrue(ce2.getFileSize() > 0);
-      Assert.assertTrue(ce2.getTimestamp() > 0);
+      Assertions.assertTrue(ce2.getPath().equals("/file2"));
+      Assertions.assertTrue(ce2.getFileSize() > 0);
+      Assertions.assertTrue(ce2.getTimestamp() > 0);
       LOG.info(ce2.toString());
-      Assert.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
+      Assertions.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
 
       // AppendOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
       Event.AppendEvent append2 = (Event.AppendEvent)batch.getEvents()[0];
-      Assert.assertEquals("/file2", append2.getPath());
-      Assert.assertFalse(append2.toNewBlock());
+      Assertions.assertEquals("/file2", append2.getPath());
+      Assertions.assertFalse(append2.toNewBlock());
       LOG.info(append2.toString());
-      Assert.assertTrue(append2.toString().startsWith("AppendEvent [path="));
+      Assertions.assertTrue(append2.toString().startsWith("AppendEvent [path="));
 
       // CloseOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
-      Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
+      Assertions.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
 
       // TimesOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue.getPath().equals("/file2"));
-      Assert.assertTrue(mue.getMetadataType() ==
+      Assertions.assertTrue(mue.getPath().equals("/file2"));
+      Assertions.assertTrue(mue.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.TIMES);
       LOG.info(mue.toString());
-      Assert.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
+      Assertions.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
 
       // SetReplicationOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue2.getPath().equals("/file2"));
-      Assert.assertTrue(mue2.getMetadataType() ==
+      Assertions.assertTrue(mue2.getPath().equals("/file2"));
+      Assertions.assertTrue(mue2.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.REPLICATION);
-      Assert.assertTrue(mue2.getReplication() == 1);
+      Assertions.assertTrue(mue2.getReplication() == 1);
       LOG.info(mue2.toString());
 
       // ConcatDeleteOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(3, batch.getEvents().length);
+      Assertions.assertEquals(3, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
-      Assert.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
-      Assert.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
+      Assertions.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
+      Assertions.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
       Event.UnlinkEvent ue2 = (Event.UnlinkEvent) batch.getEvents()[1];
-      Assert.assertTrue(ue2.getPath().equals("/file3"));
-      Assert.assertTrue(ue2.getTimestamp() > 0);
+      Assertions.assertTrue(ue2.getPath().equals("/file3"));
+      Assertions.assertTrue(ue2.getTimestamp() > 0);
       LOG.info(ue2.toString());
-      Assert.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
-      Assert.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
+      Assertions.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
+      Assertions.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
       Event.CloseEvent ce3 = (Event.CloseEvent) batch.getEvents()[2];
-      Assert.assertTrue(ce3.getPath().equals("/file2"));
-      Assert.assertTrue(ce3.getTimestamp() > 0);
+      Assertions.assertTrue(ce3.getPath().equals("/file2"));
+      Assertions.assertTrue(ce3.getTimestamp() > 0);
 
       // DeleteOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
       Event.UnlinkEvent ue = (Event.UnlinkEvent) batch.getEvents()[0];
-      Assert.assertTrue(ue.getPath().equals("/file2"));
-      Assert.assertTrue(ue.getTimestamp() > 0);
+      Assertions.assertTrue(ue.getPath().equals("/file2"));
+      Assertions.assertTrue(ue.getTimestamp() > 0);
       LOG.info(ue.toString());
 
       // MkdirOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
       Event.CreateEvent ce4 = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce4.getiNodeType() ==
+      Assertions.assertTrue(ce4.getiNodeType() ==
           Event.CreateEvent.INodeType.DIRECTORY);
-      Assert.assertTrue(ce4.getPath().equals("/dir"));
-      Assert.assertTrue(ce4.getCtime() > 0);
-      Assert.assertTrue(ce4.getReplication() == 0);
-      Assert.assertTrue(ce4.getSymlinkTarget() == null);
+      Assertions.assertTrue(ce4.getPath().equals("/dir"));
+      Assertions.assertTrue(ce4.getCtime() > 0);
+      Assertions.assertTrue(ce4.getReplication() == 0);
+      Assertions.assertTrue(ce4.getSymlinkTarget() == null);
       LOG.info(ce4.toString());
 
       // SetPermissionsOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue3.getPath().equals("/dir"));
-      Assert.assertTrue(mue3.getMetadataType() ==
+      Assertions.assertTrue(mue3.getPath().equals("/dir"));
+      Assertions.assertTrue(mue3.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.PERMS);
-      Assert.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
+      Assertions.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
       LOG.info(mue3.toString());
 
       // SetOwnerOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue4.getPath().equals("/dir"));
-      Assert.assertTrue(mue4.getMetadataType() ==
+      Assertions.assertTrue(mue4.getPath().equals("/dir"));
+      Assertions.assertTrue(mue4.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.OWNER);
-      Assert.assertTrue(mue4.getOwnerName().equals("username"));
-      Assert.assertTrue(mue4.getGroupName().equals("groupname"));
+      Assertions.assertTrue(mue4.getOwnerName().equals("username"));
+      Assertions.assertTrue(mue4.getGroupName().equals("groupname"));
       LOG.info(mue4.toString());
 
       // SymlinkOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
       Event.CreateEvent ce5 = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce5.getiNodeType() ==
+      Assertions.assertTrue(ce5.getiNodeType() ==
           Event.CreateEvent.INodeType.SYMLINK);
-      Assert.assertTrue(ce5.getPath().equals("/dir2"));
-      Assert.assertTrue(ce5.getCtime() > 0);
-      Assert.assertTrue(ce5.getReplication() == 0);
-      Assert.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
+      Assertions.assertTrue(ce5.getPath().equals("/dir2"));
+      Assertions.assertTrue(ce5.getCtime() > 0);
+      Assertions.assertTrue(ce5.getReplication() == 0);
+      Assertions.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
       LOG.info(ce5.toString());
 
       // SetXAttrOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue5.getPath().equals("/file5"));
-      Assert.assertTrue(mue5.getMetadataType() ==
+      Assertions.assertTrue(mue5.getPath().equals("/file5"));
+      Assertions.assertTrue(mue5.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.XATTRS);
-      Assert.assertTrue(mue5.getxAttrs().size() == 1);
-      Assert.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
-      Assert.assertTrue(!mue5.isxAttrsRemoved());
+      Assertions.assertTrue(mue5.getxAttrs().size() == 1);
+      Assertions.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
+      Assertions.assertTrue(!mue5.isxAttrsRemoved());
       LOG.info(mue5.toString());
 
       // RemoveXAttrOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue6.getPath().equals("/file5"));
-      Assert.assertTrue(mue6.getMetadataType() ==
+      Assertions.assertTrue(mue6.getPath().equals("/file5"));
+      Assertions.assertTrue(mue6.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.XATTRS);
-      Assert.assertTrue(mue6.getxAttrs().size() == 1);
-      Assert.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
-      Assert.assertTrue(mue6.isxAttrsRemoved());
+      Assertions.assertTrue(mue6.getxAttrs().size() == 1);
+      Assertions.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
+      Assertions.assertTrue(mue6.isxAttrsRemoved());
       LOG.info(mue6.toString());
 
       // SetAclOp (1)
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue7.getPath().equals("/file5"));
-      Assert.assertTrue(mue7.getMetadataType() ==
+      Assertions.assertTrue(mue7.getPath().equals("/file5"));
+      Assertions.assertTrue(mue7.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.ACLS);
-      Assert.assertTrue(mue7.getAcls().contains(
+      Assertions.assertTrue(mue7.getAcls().contains(
           AclEntry.parseAclEntry("user::rwx", true)));
       LOG.info(mue7.toString());
 
       // SetAclOp (2)
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue8.getPath().equals("/file5"));
-      Assert.assertTrue(mue8.getMetadataType() ==
+      Assertions.assertTrue(mue8.getPath().equals("/file5"));
+      Assertions.assertTrue(mue8.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.ACLS);
-      Assert.assertTrue(mue8.getAcls() == null);
+      Assertions.assertTrue(mue8.getAcls() == null);
       LOG.info(mue8.toString());
 
       // RenameOp (2)
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
       Event.RenameEvent re3 = (Event.RenameEvent) batch.getEvents()[0];
-      Assert.assertTrue(re3.getDstPath().equals("/dir/file5"));
-      Assert.assertTrue(re3.getSrcPath().equals("/file5"));
-      Assert.assertTrue(re3.getTimestamp() > 0);
+      Assertions.assertTrue(re3.getDstPath().equals("/dir/file5"));
+      Assertions.assertTrue(re3.getSrcPath().equals("/file5"));
+      Assertions.assertTrue(re3.getTimestamp() > 0);
       LOG.info(re3.toString());
 
       // TruncateOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert
+      Assertions
           .assertTrue(batch.getEvents()[0].getEventType() ==
           Event.EventType.TRUNCATE);
       Event.TruncateEvent et = ((Event.TruncateEvent) batch.getEvents()[0]);
-      Assert.assertTrue(et.getPath().equals("/truncate_file"));
-      Assert.assertTrue(et.getFileSize() == BLOCK_SIZE);
-      Assert.assertTrue(et.getTimestamp() > 0);
+      Assertions.assertTrue(et.getPath().equals("/truncate_file"));
+      Assertions.assertTrue(et.getFileSize() == BLOCK_SIZE);
+      Assertions.assertTrue(et.getTimestamp() > 0);
       LOG.info(et.toString());
-      Assert.assertTrue(et.toString().startsWith("TruncateEvent [path="));
+      Assertions.assertTrue(et.toString().startsWith("TruncateEvent [path="));
 
       // CreateEvent without overwrite
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType()
+      Assertions.assertTrue(batch.getEvents()[0].getEventType()
               == Event.EventType.CREATE);
       ce = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
-      Assert.assertTrue(ce.getPath().equals("/file_ec_test1"));
-      Assert.assertTrue(ce.getCtime() > 0);
-      Assert.assertTrue(ce.getReplication() > 0);
-      Assert.assertTrue(ce.getSymlinkTarget() == null);
-      Assert.assertFalse(ce.getOverwrite());
-      Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
-      Assert.assertTrue(ce.isErasureCoded().isPresent());
-      Assert.assertFalse(ce.isErasureCoded().get());
+      Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+      Assertions.assertTrue(ce.getPath().equals("/file_ec_test1"));
+      Assertions.assertTrue(ce.getCtime() > 0);
+      Assertions.assertTrue(ce.getReplication() > 0);
+      Assertions.assertTrue(ce.getSymlinkTarget() == null);
+      Assertions.assertFalse(ce.getOverwrite());
+      Assertions.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
+      Assertions.assertTrue(ce.isErasureCoded().isPresent());
+      Assertions.assertFalse(ce.isErasureCoded().get());
       LOG.info(ce.toString());
-      Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+      Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
 
       // Returns null when there are no further events
-      Assert.assertTrue(eis.poll() == null);
+      Assertions.assertTrue(eis.poll() == null);
 
       // make sure the estimate hasn't changed since the above assertion
       // tells us that we are fully caught up to the current namesystem state
       // and we should not have been behind at all when eventsBehind was set
       // either, since there were few enough events that they should have all
       // been read to the client during the first poll() call
-      Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
+      Assertions.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
 
     } finally {
       cluster.shutdown();
@@ -470,41 +470,41 @@
       EventBatch batch = null;
 
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       long txid = batch.getTxid();
       long eventsBehind = eis.getTxidsBehindEstimate();
-      Assert.assertTrue(batch.getEvents()[0].getEventType()
+      Assertions.assertTrue(batch.getEvents()[0].getEventType()
               == Event.EventType.CREATE);
       Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
-      Assert.assertTrue(ce.getPath().equals("/ecdir/file_ec_test2"));
-      Assert.assertTrue(ce.getCtime() > 0);
-      Assert.assertEquals(1, ce.getReplication());
-      Assert.assertTrue(ce.getSymlinkTarget() == null);
-      Assert.assertTrue(ce.getOverwrite());
-      Assert.assertEquals(ecPolicy.getCellSize(), ce.getDefaultBlockSize());
-      Assert.assertTrue(ce.isErasureCoded().isPresent());
-      Assert.assertTrue(ce.isErasureCoded().get());
+      Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+      Assertions.assertTrue(ce.getPath().equals("/ecdir/file_ec_test2"));
+      Assertions.assertTrue(ce.getCtime() > 0);
+      Assertions.assertEquals(1, ce.getReplication());
+      Assertions.assertTrue(ce.getSymlinkTarget() == null);
+      Assertions.assertTrue(ce.getOverwrite());
+      Assertions.assertEquals(ecPolicy.getCellSize(), ce.getDefaultBlockSize());
+      Assertions.assertTrue(ce.isErasureCoded().isPresent());
+      Assertions.assertTrue(ce.isErasureCoded().get());
       LOG.info(ce.toString());
-      Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+      Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
 
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType()
+      Assertions.assertTrue(batch.getEvents()[0].getEventType()
               == Event.EventType.CLOSE);
-      Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath()
+      Assertions.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath()
               .equals("/ecdir/file_ec_test2"));
 
       // Returns null when there are no further events
-      Assert.assertTrue(eis.poll() == null);
+      Assertions.assertTrue(eis.poll() == null);
 
       // make sure the estimate hasn't changed since the above assertion
       // tells us that we are fully caught up to the current namesystem state
       // and we should not have been behind at all when eventsBehind was set
       // either, since there were few enough events that they should have all
       // been read to the client during the first poll() call
-      Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
+      Assertions.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
     } finally {
       cluster.shutdown();
     }
@@ -532,12 +532,12 @@
       // active
       for (int i = 0; i < 10; i++) {
         batch = waitForNextEvents(eis);
-        Assert.assertEquals(1, batch.getEvents().length);
-        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
-        Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
+        Assertions.assertEquals(1, batch.getEvents().length);
+        Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+        Assertions.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
             i));
       }
-      Assert.assertTrue(eis.poll() == null);
+      Assertions.assertTrue(eis.poll() == null);
     } finally {
       cluster.shutdown();
     }
@@ -571,12 +571,12 @@
       EventBatch batch = null;
       for (int i = 0; i < 10; i++) {
         batch = waitForNextEvents(eis);
-        Assert.assertEquals(1, batch.getEvents().length);
-        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
-        Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
+        Assertions.assertEquals(1, batch.getEvents().length);
+        Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+        Assertions.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
             i));
       }
-      Assert.assertTrue(eis.poll() == null);
+      Assertions.assertTrue(eis.poll() == null);
     } finally {
       try {
         cluster.shutdown();
@@ -615,10 +615,10 @@
       // a very generous wait period -- the edit will definitely have been
       // processed by the time this is up
       EventBatch batch = eis.poll(5, TimeUnit.SECONDS);
-      Assert.assertNotNull(batch);
-      Assert.assertEquals(1, batch.getEvents().length);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
-      Assert.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
+      Assertions.assertNotNull(batch);
+      Assertions.assertEquals(1, batch.getEvents().length);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+      Assertions.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java
index c5537b5..90f31ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java
@@ -33,10 +33,10 @@
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -64,10 +64,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Class for Kerberized test cases for {@link DFSInotifyEventInputStream}.
@@ -131,7 +128,7 @@
           while ((batch = eis.poll()) != null) {
             LOG.info("txid: " + batch.getTxid());
           }
-          assertNull("poll should not return anything", eis.poll());
+            assertNull(eis.poll(), "poll should not return anything");
 
           Thread.sleep(6000);
           LOG.info("Slept 6 seconds to make sure the TGT has expired.");
@@ -143,16 +140,16 @@
 
           // verify we can poll after a tgt expiration interval
           batch = eis.poll();
-          assertNotNull("poll should return something", batch);
+            assertNotNull(batch, "poll should return something");
           assertEquals(1, batch.getEvents().length);
-          assertNull("poll should not return anything", eis.poll());
+            assertNull(eis.poll(), "poll should not return anything");
           return null;
         }
       }
     });
   }
 
-  @Before
+  @BeforeEach
   public void initKerberizedCluster() throws Exception {
     baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
         TestDFSInotifyEventInputStreamKerberized.class.getSimpleName());
@@ -169,8 +166,8 @@
     SecurityUtil.setAuthenticationMethod(
         UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
     UserGroupInformation.setConfiguration(baseConf);
-    assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+      assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     final String userName = "hdfs";
     nnKeytabFile = new File(baseDir, userName + ".keytab");
@@ -218,7 +215,7 @@
         KeyStoreTestUtil.getServerSSLConfigFileName());
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
index 2f9e0d3..fa263b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
@@ -18,11 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_READ_USE_CACHE_PRIORITY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -47,8 +43,8 @@
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry;
 
-import org.junit.Assume;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 public class TestDFSInputStream {
   private void testSkipInner(MiniDFSCluster cluster) throws IOException {
@@ -108,7 +104,7 @@
 
   @Test(timeout=60000)
   public void testSkipWithLocalBlockReader() throws IOException {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
     DomainSocket.disableBindPathValidation();
     Configuration conf = new Configuration();
@@ -218,10 +214,10 @@
       final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
       cluster.getNameNode().getNamesystem().getBlockManager()
           .getDatanodeManager().fetchDatanodes(live, null, false);
-      assertTrue("DN start should be success and live dn should be 2",
-          live.size() == 2);
-      assertTrue("File size should be " + chunkSize,
-          fs.getFileStatus(file).getLen() == chunkSize);
+        assertTrue(
+                live.size() == 2, "DN start should be success and live dn should be 2");
+        assertTrue(
+                fs.getFileStatus(file).getLen() == chunkSize, "File size should be " + chunkSize);
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java
index 9fed914..acbbc17 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java
@@ -19,11 +19,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -41,10 +37,10 @@
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -84,7 +80,7 @@
     enableBlkExpiration = enableExpiration;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new HdfsConfiguration();
     conf.setBoolean(
@@ -119,7 +115,7 @@
     fs = dfsCluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     if (dfsClient != null) {
       dfsClient.close();
@@ -172,21 +168,21 @@
       DatanodeInfo[] firstBlkDNInfos = firstLocatedBlk.getLocations();
       while (fin.getPos() < firstBlockMark) {
         bytesRead = fin.read(readBuffer);
-        Assert.assertTrue("Unexpected number of read bytes",
-            chunkReadSize >= bytesRead);
+          Assertions.assertTrue(
+                  chunkReadSize >= bytesRead, "Unexpected number of read bytes");
         if (currDNInfo == null) {
           currDNInfo = fin.getCurrentDatanode();
-          assertNotNull("current FIS datanode is null", currDNInfo);
+            assertNotNull(currDNInfo, "current FIS datanode is null");
           continue;
         }
         prevDNInfo = currDNInfo;
         currDNInfo = fin.getCurrentDatanode();
-        assertEquals("the DFSInput stream does not read from same node",
-            prevDNInfo, currDNInfo);
+          assertEquals(
+                  prevDNInfo, currDNInfo, "the DFSInput stream does not read from same node");
       }
 
-      assertEquals("InputStream exceeds expected position",
-          firstBlockMark, fin.getPos());
+        assertEquals(
+                firstBlockMark, fin.getPos(), "InputStream exceeds expected position");
       // get the second block locations
       LocatedBlock secondLocatedBlk =
           fin.locatedBlocks.getLocatedBlocks().get(1);
@@ -216,23 +212,23 @@
       }
       while (fin.getPos() < secondBlockMark) {
         bytesRead = fin.read(readBuffer);
-        assertTrue("dead node used to read at position: " + fin.getPos(),
-            fin.deadNodesContain(deadNodeInfo));
-        Assert.assertTrue("Unexpected number of read bytes",
-            chunkReadSize >= bytesRead);
+          assertTrue(
+                  fin.deadNodesContain(deadNodeInfo), "dead node used to read at position: " + fin.getPos());
+          Assertions.assertTrue(
+                  chunkReadSize >= bytesRead, "Unexpected number of read bytes");
         prevDNInfo = currDNInfo;
         currDNInfo = fin.getCurrentDatanode();
         assertNotEquals(deadNodeInfo, currDNInfo);
         if (firstIteration) {
-          // currDNInfo has to be different unless first block locs is different
-          assertFalse("FSInputStream should pick a different DN",
-              firstBlkDNInfos[0].equals(deadNodeInfo)
-                  && prevDNInfo.equals(currDNInfo));
+            // currDNInfo has to be different unless first block locs is different
+            assertFalse(
+                    firstBlkDNInfos[0].equals(deadNodeInfo)
+                            && prevDNInfo.equals(currDNInfo), "FSInputStream should pick a different DN");
           firstIteration = false;
         }
       }
-      assertEquals("InputStream exceeds expected position",
-          secondBlockMark, fin.getPos());
+        assertEquals(
+                secondBlockMark, fin.getPos(), "InputStream exceeds expected position");
       // restart the dead node with the same port
       assertTrue(dfsCluster.restartDataNode(stoppedDNProps, true));
       dfsCluster.waitActive();
@@ -244,13 +240,13 @@
       while (fin.getPos() < thirdBlockMark) {
         bytesRead = fin.read(readBuffer);
         if (this.enableBlkExpiration) {
-          assertEquals("node is removed from deadNodes after 1st iteration",
-              firstIteration, fin.deadNodesContain(deadNodeInfo));
+            assertEquals(
+                    firstIteration, fin.deadNodesContain(deadNodeInfo), "node is removed from deadNodes after 1st iteration");
         } else {
           assertTrue(fin.deadNodesContain(deadNodeInfo));
         }
-        Assert.assertTrue("Unexpected number of read bytes",
-            chunkReadSize >= bytesRead);
+          Assertions.assertTrue(
+                  chunkReadSize >= bytesRead, "Unexpected number of read bytes");
         prevDNInfo = currDNInfo;
         currDNInfo = fin.getCurrentDatanode();
         if (!this.enableBlkExpiration) {
@@ -266,8 +262,8 @@
           }
         }
       }
-      assertEquals("InputStream exceeds expected position",
-          thirdBlockMark, fin.getPos());
+        assertEquals(
+                thirdBlockMark, fin.getPos(), "InputStream exceeds expected position");
     } finally {
       if (fout != null) {
         fout.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
index e19f3281..d0cd130 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -30,7 +30,7 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.util.Time;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests that the DFS command mkdirs only creates valid
@@ -106,10 +106,10 @@
       } catch (IOException e) {
         expectedException = e;
       }
-      assertTrue("Create a directory when parent dir exists as file using"
-          + " mkdir() should throw ParentNotDirectoryException ",
-          expectedException != null
-              && expectedException instanceof ParentNotDirectoryException);
+        assertTrue(
+                expectedException != null
+                        && expectedException instanceof ParentNotDirectoryException, "Create a directory when parent dir exists as file using"
+                + " mkdir() should throw ParentNotDirectoryException ");
       // Create a dir in a non-exist directory, should fail
       expectedException = null;
       try {
@@ -118,10 +118,10 @@
       } catch (IOException e) {
         expectedException = e;
       }
-      assertTrue("Create a directory in a non-exist parent dir using"
-          + " mkdir() should throw FileNotFoundException ",
-          expectedException != null
-              && expectedException instanceof FileNotFoundException);
+        assertTrue(
+                expectedException != null
+                        && expectedException instanceof FileNotFoundException, "Create a directory in a non-exist parent dir using"
+                + " mkdir() should throw FileNotFoundException ");
     } finally {
       dfs.close();
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
index 432ac8e..6867faa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
@@ -57,22 +57,19 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyLong;
 import org.mockito.Mockito;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
-import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.doThrow;
@@ -85,7 +82,7 @@
 public class TestDFSOutputStream {
   static MiniDFSCluster cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@@ -107,7 +104,7 @@
     LastExceptionInStreamer ex = (LastExceptionInStreamer) Whitebox
         .getInternalState(streamer, "lastException");
     Throwable thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
-    Assert.assertNull(thrown);
+    Assertions.assertNull(thrown);
 
     dos.close();
 
@@ -119,7 +116,7 @@
       assertEquals(e, dummy);
     }
     thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
-    Assert.assertNull(thrown);
+    Assertions.assertNull(thrown);
     dos.close();
   }
 
@@ -145,10 +142,10 @@
     Field field = dos.getClass().getDeclaredField("packetSize");
     field.setAccessible(true);
 
-    Assert.assertTrue((Integer) field.get(dos) + 33 < packetSize);
+    Assertions.assertTrue((Integer) field.get(dos) + 33 < packetSize);
     // If PKT_MAX_HEADER_LEN is 257, actual packet size come to over 64KB
     // without a fix on HDFS-7308.
-    Assert.assertTrue((Integer) field.get(dos) + 257 < packetSize);
+    Assertions.assertTrue((Integer) field.get(dos) + 257 < packetSize);
   }
 
   /**
@@ -246,21 +243,21 @@
       final Field writePacketSizeField = dos.getClass()
           .getDeclaredField("writePacketSize");
       writePacketSizeField.setAccessible(true);
-      Assert.assertEquals(writePacketSizeField.getInt(dos),
+      Assertions.assertEquals(writePacketSizeField.getInt(dos),
           finalWritePacketSize);
 
       /* get and verify chunksPerPacket */
       final Field chunksPerPacketField = dos.getClass()
           .getDeclaredField("chunksPerPacket");
       chunksPerPacketField.setAccessible(true);
-      Assert.assertEquals(chunksPerPacketField.getInt(dos),
+      Assertions.assertEquals(chunksPerPacketField.getInt(dos),
           (finalWritePacketSize - packateMaxHeaderLength) / chunkSize);
 
       /* get and verify packetSize */
       final Field packetSizeField = dos.getClass()
           .getDeclaredField("packetSize");
       packetSizeField.setAccessible(true);
-      Assert.assertEquals(packetSizeField.getInt(dos),
+      Assertions.assertEquals(packetSizeField.getInt(dos),
           chunksPerPacketField.getInt(dos) * chunkSize);
     } finally {
       if (dfsCluster != null) {
@@ -297,7 +294,7 @@
     DFSPacket packet = mock(DFSPacket.class);
     dataQueue.add(packet);
     stream.run();
-    Assert.assertTrue(congestedNodes.isEmpty());
+    Assertions.assertTrue(congestedNodes.isEmpty());
   }
 
   @Test
@@ -359,11 +356,11 @@
   public void testStreamFlush() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FSDataOutputStream os = fs.create(new Path("/normal-file"));
-    // Verify output stream supports hsync() and hflush().
-    assertTrue("DFSOutputStream should support hflush()!",
-        os.hasCapability(StreamCapability.HFLUSH.getValue()));
-    assertTrue("DFSOutputStream should support hsync()!",
-        os.hasCapability(StreamCapability.HSYNC.getValue()));
+      // Verify output stream supports hsync() and hflush().
+      assertTrue(
+              os.hasCapability(StreamCapability.HFLUSH.getValue()), "DFSOutputStream should support hflush()!");
+      assertTrue(
+              os.hasCapability(StreamCapability.HSYNC.getValue()), "DFSOutputStream should support hsync()!");
     byte[] bytes = new byte[1024];
     InputStream is = new ByteArrayInputStream(bytes);
     IOUtils.copyBytes(is, os, bytes.length);
@@ -422,7 +419,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index 15ce06b..d813b2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
@@ -45,9 +42,9 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /** Unit tests for permission */
 public class TestDFSPermission {
@@ -115,13 +112,13 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
   }
   
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -185,7 +182,7 @@
     // case 5: test non-existent parent directory
     uMask = DEFAULT_UMASK;
     initFileSystem(uMask);
-    assertFalse("File shouldn't exists", fs.exists(NON_EXISTENT_PATH));
+      assertFalse(fs.exists(NON_EXISTENT_PATH), "File shouldn't exists");
     createAndCheckPermission(op, NON_EXISTENT_PATH, uMask, new FsPermission(
         DEFAULT_PERMISSION), false);
     Path parent = NON_EXISTENT_PATH.getParent();
@@ -324,8 +321,8 @@
         fail("User2 should not be allowed to delete user1's dir.");
       } catch (AccessControlException e) {
         e.printStackTrace();
-        assertTrue("Permission denied messages must carry the username",
-            e.getMessage().contains(USER2_NAME));
+          assertTrue(
+                  e.getMessage().contains(USER2_NAME), "Permission denied messages must carry the username");
       }
 
       // ensure the /BSS/user1 still exists
@@ -357,8 +354,8 @@
         // expect the exception is caused by permission denied
         assertTrue(e.getCause() instanceof AccessControlException);
         e.printStackTrace();
-        assertTrue("Permission denied messages must carry the username",
-            e.getCause().getMessage().contains(USER2_NAME));
+          assertTrue(
+                  e.getCause().getMessage().contains(USER2_NAME), "Permission denied messages must carry the username");
       }
 
       // ensure /BSS/user1 still exists
@@ -540,11 +537,11 @@
       fs.access(p1, FsAction.WRITE);
       fail("The access call should have failed.");
     } catch (AccessControlException e) {
-      assertTrue("Permission denied messages must carry the username",
-              e.getMessage().contains(USER1_NAME));
-      assertTrue("Permission denied messages must carry the path parent",
-              e.getMessage().contains(
-                  p1.getParent().toUri().getPath()));
+        assertTrue(
+                e.getMessage().contains(USER1_NAME), "Permission denied messages must carry the username");
+        assertTrue(
+                e.getMessage().contains(
+                        p1.getParent().toUri().getPath()), "Permission denied messages must carry the path parent");
     }
 
     Path badPath = new Path("/bad/bad");
@@ -574,11 +571,11 @@
       fs.access(p2, FsAction.EXECUTE);
       fail("The access call should have failed.");
     } catch (AccessControlException e) {
-      assertTrue("Permission denied messages must carry the username",
-              e.getMessage().contains(USER1_NAME));
-      assertTrue("Permission denied messages must carry the path parent",
-              e.getMessage().contains(
-                  p2.getParent().toUri().getPath()));
+        assertTrue(
+                e.getMessage().contains(USER1_NAME), "Permission denied messages must carry the username");
+        assertTrue(
+                e.getMessage().contains(
+                        p2.getParent().toUri().getPath()), "Permission denied messages must carry the path parent");
     }
   }
 
@@ -599,11 +596,11 @@
       fs.access(p3, FsAction.READ_WRITE);
       fail("The access call should have failed.");
     } catch (AccessControlException e) {
-      assertTrue("Permission denied messages must carry the username",
-              e.getMessage().contains(USER1_NAME));
-      assertTrue("Permission denied messages must carry the path parent",
-              e.getMessage().contains(
-                  p3.getParent().toUri().getPath()));
+        assertTrue(
+                e.getMessage().contains(USER1_NAME), "Permission denied messages must carry the username");
+        assertTrue(
+                e.getMessage().contains(
+                        p3.getParent().toUri().getPath()), "Permission denied messages must carry the path parent");
     }
   }
 
@@ -636,11 +633,11 @@
       fs.exists(nfpath);
       fail("The exists call should have failed.");
     } catch (AccessControlException e) {
-      assertTrue("Permission denied messages must carry file path",
-          e.getMessage().contains(fpath.getName()));
-      assertTrue("Permission denied messages must specify existing_file is not "
-              + "a directory, when checked on /existing_file/non_existing_name",
-          e.getMessage().contains("is not a directory"));
+        assertTrue(
+                e.getMessage().contains(fpath.getName()), "Permission denied messages must carry file path");
+        assertTrue(
+                e.getMessage().contains("is not a directory"), "Permission denied messages must specify existing_file is not "
+                + "a directory, when checked on /existing_file/non_existing_name");
     }
 
     rootFs.setPermission(p4, new FsPermission("600"));
@@ -648,13 +645,13 @@
       fs.exists(nfpath);
       fail("The exists call should have failed.");
     } catch (AccessControlException e) {
-      assertFalse("Permission denied messages must not carry full file path,"
-              + "since the user does not have permission on /p4: "
-              + e.getMessage(),
-          e.getMessage().contains(fpath.getName()));
-      assertFalse("Permission denied messages must not specify /p4"
-          + " is not a directory: " + e.getMessage(),
-          e.getMessage().contains("is not a directory"));
+        assertFalse(
+                e.getMessage().contains(fpath.getName()), "Permission denied messages must not carry full file path,"
+                + "since the user does not have permission on /p4: "
+                + e.getMessage());
+        assertFalse(
+                e.getMessage().contains("is not a directory"), "Permission denied messages must not specify /p4"
+                + " is not a directory: " + e.getMessage());
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java
index 7630dd6..7aef45c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java
@@ -16,8 +16,8 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -28,7 +28,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestDFSRemove {
   final Path dir = new Path("/test/remove/");
@@ -80,8 +80,8 @@
         Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
         // all blocks should be gone now.
         long dfsUsedFinal = getTotalDfsUsed(cluster);
-        assertEquals("All blocks should be gone. start=" + dfsUsedStart
-            + " max=" + dfsUsedMax + " final=" + dfsUsedFinal, dfsUsedStart, dfsUsedFinal);
+          assertEquals(dfsUsedStart, dfsUsedFinal, "All blocks should be gone. start=" + dfsUsedStart
+                  + " max=" + dfsUsedMax + " final=" + dfsUsedFinal);
       }
 
       fs.delete(dir, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
index fe2eee2..2de42db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
@@ -16,9 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -191,9 +189,9 @@
       dfs.rename(path, new Path("/dir1"),
           new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
       String auditOut = auditLog.getOutput();
-      assertTrue("Rename should have both OVERWRITE and TO_TRASH "
-              + "flags at namenode but had only " + auditOut,
-          auditOut.contains("options=[OVERWRITE, TO_TRASH]"));
+        assertTrue(
+                auditOut.contains("options=[OVERWRITE, TO_TRASH]"), "Rename should have both OVERWRITE and TO_TRASH "
+                + "flags at namenode but had only " + auditOut);
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
index bcb37e3..2f24671 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
@@ -19,7 +19,7 @@
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -38,16 +38,15 @@
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
 
 /**
-* This test ensures the appropriate response (successful or failure) from
-* the system when the system is rolled back under various storage state and
-* version conditions.
-*/
+ * This test ensures the appropriate response (successful or failure) from
+ * the system when the system is rolled back under various storage state and
+ * version conditions.
+ */
 public class TestDFSRollback {
  
   private static final Logger LOG = LoggerFactory.getLogger(
@@ -127,8 +126,8 @@
   void startBlockPoolShouldFail(StartupOption operation, String bpid)
       throws IOException {
     cluster.startDataNodes(conf, 1, false, operation, null); // should fail
-    assertFalse("Block pool " + bpid + " should have failed to start", 
-        cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
+      assertFalse(
+              cluster.getDataNodes().get(0).isBPServiceAlive(bpid), "Block pool " + bpid + " should have failed to start");
   }
  
   /**
@@ -344,7 +343,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 0816c3f..6a523ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -41,6 +41,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -67,10 +70,7 @@
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.rules.Timeout;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Assert;
 import org.slf4j.event.Level;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
@@ -81,7 +81,8 @@
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.hamcrest.core.StringContains.containsString;
 
 /**
@@ -106,7 +107,7 @@
   private static MiniDFSCluster miniCluster;
   private static DistributedFileSystem dfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     final Configuration conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
@@ -124,7 +125,7 @@
     dfs = miniCluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (miniCluster != null) {
       miniCluster.shutdown(true, true);
@@ -321,7 +322,7 @@
         System.err.println("Exception raised from DFSShell.run " +
             e.getLocalizedMessage());
       }
-      assertEquals("Return code should be 0.", 0, val);
+        assertEquals(0, val, "Return code should be 0.");
       returnString = out.toString();
       out.reset();
       assertTrue(returnString.contains("1   2   " + myFile3.toString()));
@@ -641,74 +642,74 @@
       argv[0] = "-cat";
       argv[1] = root.toUri().getPath();
       int ret = ToolRunner.run(new FsShell(), argv);
-      assertEquals(" -cat returned 1 ", 1, ret);
+        assertEquals(1, ret, " -cat returned 1 ");
       String returned = out.toString();
-      assertTrue("cat does not print exceptions ",
-          (returned.lastIndexOf("Exception") == -1));
+        assertTrue(
+                (returned.lastIndexOf("Exception") == -1), "cat does not print exceptions ");
       out.reset();
       argv[0] = "-rm";
       argv[1] = root.toString();
       FsShell shell = new FsShell(dfs.getConf());
       ret = ToolRunner.run(shell, argv);
-      assertEquals(" -rm returned 1 ", 1, ret);
+        assertEquals(1, ret, " -rm returned 1 ");
       returned = out.toString();
       out.reset();
-      assertTrue("rm prints reasonable error ",
-          (returned.lastIndexOf("No such file or directory") != -1));
+        assertTrue(
+                (returned.lastIndexOf("No such file or directory") != -1), "rm prints reasonable error ");
       argv[0] = "-rmr";
       argv[1] = root.toString();
       ret = ToolRunner.run(shell, argv);
-      assertEquals(" -rmr returned 1", 1, ret);
+        assertEquals(1, ret, " -rmr returned 1");
       returned = out.toString();
-      assertTrue("rmr prints reasonable error ",
-    		  (returned.lastIndexOf("No such file or directory") != -1));
+        assertTrue(
+                (returned.lastIndexOf("No such file or directory") != -1), "rmr prints reasonable error ");
       out.reset();
       argv[0] = "-du";
       argv[1] = "/nonexistentfile";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertTrue(" -du prints reasonable error ",
-          (returned.lastIndexOf("No such file or directory") != -1));
+        assertTrue(
+                (returned.lastIndexOf("No such file or directory") != -1), " -du prints reasonable error ");
       out.reset();
       argv[0] = "-dus";
       argv[1] = "/nonexistentfile";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertTrue(" -dus prints reasonable error",
-          (returned.lastIndexOf("No such file or directory") != -1));
+        assertTrue(
+                (returned.lastIndexOf("No such file or directory") != -1), " -dus prints reasonable error");
       out.reset();
       argv[0] = "-ls";
       argv[1] = "/nonexistenfile";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertTrue(" -ls does not return Found 0 items",
-          (returned.lastIndexOf("Found 0") == -1));
+        assertTrue(
+                (returned.lastIndexOf("Found 0") == -1), " -ls does not return Found 0 items");
       out.reset();
       argv[0] = "-ls";
       argv[1] = "/nonexistentfile";
       ret = ToolRunner.run(shell, argv);
-      assertEquals(" -lsr should fail ", 1, ret);
+        assertEquals(1, ret, " -lsr should fail ");
       out.reset();
       dfs.mkdirs(new Path("/testdir"));
       argv[0] = "-ls";
       argv[1] = "/testdir";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertTrue(" -ls does not print out anything ",
-          (returned.lastIndexOf("Found 0") == -1));
+        assertTrue(
+                (returned.lastIndexOf("Found 0") == -1), " -ls does not print out anything ");
       out.reset();
       argv[0] = "-ls";
       argv[1] = "/user/nonxistant/*";
       ret = ToolRunner.run(shell, argv);
-      assertEquals(" -ls on nonexistent glob returns 1", 1, ret);
+        assertEquals(1, ret, " -ls on nonexistent glob returns 1");
       out.reset();
       argv[0] = "-mkdir";
       argv[1] = "/testdir";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertEquals(" -mkdir returned 1 ", 1, ret);
-      assertTrue(" -mkdir returned File exists",
-          (returned.lastIndexOf("File exists") != -1));
+        assertEquals(1, ret, " -mkdir returned 1 ");
+        assertTrue(
+                (returned.lastIndexOf("File exists") != -1), " -mkdir returned File exists");
       Path testFile = new Path("/testfile");
       OutputStream outtmp = dfs.create(testFile);
       outtmp.write(testFile.toString().getBytes());
@@ -718,24 +719,24 @@
       argv[1] = "/testfile";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertEquals(" -mkdir returned 1", 1, ret);
-      assertTrue(" -mkdir returned this is a file ",
-          (returned.lastIndexOf("not a directory") != -1));
+        assertEquals(1, ret, " -mkdir returned 1");
+        assertTrue(
+                (returned.lastIndexOf("not a directory") != -1), " -mkdir returned this is a file ");
       out.reset();
       argv[0] = "-mkdir";
       argv[1] = "/testParent/testChild";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertEquals(" -mkdir returned 1", 1, ret);
-      assertTrue(" -mkdir returned there is No file or directory but has testChild in the path",
-          (returned.lastIndexOf("testChild") == -1));
+        assertEquals(1, ret, " -mkdir returned 1");
+        assertTrue(
+                (returned.lastIndexOf("testChild") == -1), " -mkdir returned there is No file or directory but has testChild in the path");
       out.reset();
       argv = new String[3];
       argv[0] = "-mv";
       argv[1] = "/testfile";
       argv[2] = "/no-such-dir/file";
       ret = ToolRunner.run(shell, argv);
-      assertEquals("mv failed to rename", 1,  ret);
+        assertEquals(1,  ret, "mv failed to rename");
       out.reset();
       argv = new String[3];
       argv[0] = "-mv";
@@ -743,25 +744,25 @@
       argv[2] = "/testfiletest";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertTrue("no output from rename",
-          (returned.lastIndexOf("Renamed") == -1));
+        assertTrue(
+                (returned.lastIndexOf("Renamed") == -1), "no output from rename");
       out.reset();
       argv[0] = "-mv";
       argv[1] = "/testfile";
       argv[2] = "/testfiletmp";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertTrue(" unix like output",
-          (returned.lastIndexOf("No such file or") != -1));
+        assertTrue(
+                (returned.lastIndexOf("No such file or") != -1), " unix like output");
       out.reset();
       argv = new String[1];
       argv[0] = "-du";
       dfs.mkdirs(dfs.getHomeDirectory());
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertEquals(" no error ", 0, ret);
-      assertTrue("empty path specified",
-          (returned.lastIndexOf("empty string") == -1));
+        assertEquals(0, ret, " no error ");
+        assertTrue(
+                (returned.lastIndexOf("empty string") == -1), "empty path specified");
       out.reset();
       argv = new String[3];
       argv[0] = "-test";
@@ -769,7 +770,7 @@
       argv[2] = "/no/such/dir";
       ret = ToolRunner.run(shell, argv);
       returned = out.toString();
-      assertEquals(" -test -d wrong result ", 1, ret);
+        assertEquals(1, ret, " -test -d wrong result ");
       assertTrue(returned.isEmpty());
     } finally {
       if (bak != null) {
@@ -802,7 +803,7 @@
       argv[1] = srcFs.getUri() + "/testfile";
       argv[2] = "hdfs://" + srcFs.getUri().getHost() + "/testfile2";
       int ret = ToolRunner.run(shell, argv);
-      assertEquals("mv should have succeeded", 0, ret);
+        assertEquals(0, ret, "mv should have succeeded");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -831,19 +832,19 @@
       argv[0] = "-ls";
       argv[1] = dstFs.getUri().toString() + "/";
       int ret = ToolRunner.run(shell, argv);
-      assertEquals("ls works on remote uri ", 0, ret);
+        assertEquals(0, ret, "ls works on remote uri ");
       //check for rm -r
       dstFs.mkdirs(new Path("/hadoopdir"));
       argv = new String[2];
       argv[0] = "-rmr";
       argv[1] = dstFs.getUri().toString() + "/hadoopdir";
       ret = ToolRunner.run(shell, argv);
-      assertEquals("-rmr works on remote uri " + argv[1], 0, ret);
+        assertEquals(0, ret, "-rmr works on remote uri " + argv[1]);
       //check du
       argv[0] = "-du";
       argv[1] = dstFs.getUri().toString() + "/";
       ret = ToolRunner.run(shell, argv);
-      assertEquals("du works on remote uri ", 0, ret);
+        assertEquals(0, ret, "du works on remote uri ");
       //check put
       File furi = new File(TEST_ROOT_DIR, "furi");
       createLocalFile(furi);
@@ -852,20 +853,20 @@
       argv[1] = furi.toURI().toString();
       argv[2] = dstFs.getUri().toString() + "/furi";
       ret = ToolRunner.run(shell, argv);
-      assertEquals(" put is working ", 0, ret);
+        assertEquals(0, ret, " put is working ");
       //check cp
       argv[0] = "-cp";
       argv[1] = dstFs.getUri().toString() + "/furi";
       argv[2] = srcFs.getUri().toString() + "/furi";
       ret = ToolRunner.run(shell, argv);
-      assertEquals(" cp is working ", 0, ret);
+        assertEquals(0, ret, " cp is working ");
       assertTrue(srcFs.exists(new Path("/furi")));
       //check cat
       argv = new String[2];
       argv[0] = "-cat";
       argv[1] = dstFs.getUri().toString() + "/furi";
       ret = ToolRunner.run(shell, argv);
-      assertEquals(" cat is working ", 0, ret);
+        assertEquals(0, ret, " cat is working ");
       //check chown
       dstFs.delete(new Path("/furi"), true);
       dstFs.delete(new Path("/hadoopdir"), true);
@@ -882,15 +883,15 @@
       argv[0] = "-cat";
       argv[1] = "hdfs:///furi";
       ret = ToolRunner.run(shell, argv);
-      assertEquals(" default works for cat", 0, ret);
+        assertEquals(0, ret, " default works for cat");
       argv[0] = "-ls";
       argv[1] = "hdfs:///";
       ret = ToolRunner.run(shell, argv);
-      assertEquals("default works for ls ", 0, ret);
+        assertEquals(0, ret, "default works for ls ");
       argv[0] = "-rmr";
       argv[1] = "hdfs:///furi";
       ret = ToolRunner.run(shell, argv);
-      assertEquals("default works for rm/rmr", 0, ret);
+        assertEquals(0, ret, "default works for rm/rmr");
     } finally {
       if (null != srcCluster) {
         srcCluster.shutdown();
@@ -919,12 +920,12 @@
     final String[] argv = new String[]{"-head", testFile.toString()};
     final int ret = ToolRunner.run(new FsShell(dfs.getConf()), argv);
 
-    assertEquals(Arrays.toString(argv) + " returned " + ret, 0, ret);
-    assertEquals("-head returned " + out.size() + " bytes data, expected 1KB",
-            1024, out.size());
-    // tailed out last 1KB of the file content
-    assertArrayEquals("Head output doesn't match input",
-            text.substring(0, 1024).getBytes(), out.toByteArray());
+      assertEquals(0, ret, Arrays.toString(argv) + " returned " + ret);
+      assertEquals(
+              1024, out.size(), "-head returned " + out.size() + " bytes data, expected 1KB");
+      // tailed out last 1KB of the file content
+      assertArrayEquals(
+              text.substring(0, 1024).getBytes(), out.toByteArray(), "Head output doesn't match input");
     out.reset();
   }
 
@@ -946,12 +947,12 @@
     final String[] argv = new String[]{"-tail", testFile.toString()};
     final int ret = ToolRunner.run(new FsShell(dfs.getConf()), argv);
 
-    assertEquals(Arrays.toString(argv) + " returned " + ret, 0, ret);
-    assertEquals("-tail returned " + out.size() + " bytes data, expected 1KB",
-        1024, out.size());
-    // tailed out last 1KB of the file content
-    assertArrayEquals("Tail output doesn't match input",
-        text.substring(fileLen - 1024).getBytes(), out.toByteArray());
+      assertEquals(0, ret, Arrays.toString(argv) + " returned " + ret);
+      assertEquals(
+              1024, out.size(), "-tail returned " + out.size() + " bytes data, expected 1KB");
+      // tailed out last 1KB of the file content
+      assertArrayEquals(
+              text.substring(fileLen - 1024).getBytes(), out.toByteArray(), "Tail output doesn't match input");
     out.reset();
   }
 
@@ -1038,9 +1039,9 @@
       argv[0] = "-text";
       argv[1] = new Path(root, "file.gz").toString();
       int ret = ToolRunner.run(new FsShell(conf), argv);
-      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
-      assertTrue("Output doesn't match input",
-          Arrays.equals(file.toByteArray(), out.toByteArray()));
+        assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+        assertTrue(
+                Arrays.equals(file.toByteArray(), out.toByteArray()), "Output doesn't match input");
 
       // Create a sequence file with a gz extension, to test proper
       // container detection. Magic detection.
@@ -1057,9 +1058,9 @@
       argv[0] = "-text";
       argv[1] = new Path(root, "file.gz").toString();
       ret = ToolRunner.run(new FsShell(conf), argv);
-      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
-      assertTrue("Output doesn't match input",
-          Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray()));
+        assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+        assertTrue(
+                Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray()), "Output doesn't match input");
       out.reset();
 
       // Test deflate. Extension-based detection.
@@ -1074,9 +1075,9 @@
       argv[0] = "-text";
       argv[1] = new Path(root, "file.deflate").toString();
       ret = ToolRunner.run(new FsShell(conf), argv);
-      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
-      assertTrue("Output doesn't match input",
-          Arrays.equals(outbytes, out.toByteArray()));
+        assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+        assertTrue(
+                Arrays.equals(outbytes, out.toByteArray()), "Output doesn't match input");
       out.reset();
 
       // Test a simple codec. Extension based detection. We use
@@ -1095,9 +1096,9 @@
       argv[0] = "-text";
       argv[1] = new Path(root, p).toString();
       ret = ToolRunner.run(new FsShell(conf), argv);
-      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
-      assertTrue("Output doesn't match input",
-          Arrays.equals(writebytes, out.toByteArray()));
+        assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+        assertTrue(
+                Arrays.equals(writebytes, out.toByteArray()), "Output doesn't match input");
       out.reset();
 
       // Test a plain text.
@@ -1111,9 +1112,9 @@
       argv[0] = "-text";
       argv[1] = new Path(root, "file.txt").toString();
       ret = ToolRunner.run(new FsShell(conf), argv);
-      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
-      assertTrue("Output doesn't match input",
-          Arrays.equals(writebytes, out.toByteArray()));
+        assertEquals(0, ret, "'-text " + argv[1] + " returned " + ret);
+        assertTrue(
+                Arrays.equals(writebytes, out.toByteArray()), "Output doesn't match input");
       out.reset();
     } finally {
       if (null != bak) {
@@ -1142,7 +1143,7 @@
       assertTrue(out.toString().contains(StringUtils
           .byteToHexString(checksum.getBytes(), 0, checksum.getLength())));
     } finally {
-      Assert.assertNotNull(printStream);
+      Assertions.assertNotNull(printStream);
       System.setOut(printStream);
     }
   }
@@ -1167,22 +1168,22 @@
       File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2");
 
       File f1 = new File(localroot, "f1");
-      assertTrue("Copying failed.", f1.isFile());
+        assertTrue(f1.isFile(), "Copying failed.");
 
       File f2 = new File(localroot, "f2");
-      assertTrue("Copying failed.", f2.isFile());
+        assertTrue(f2.isFile(), "Copying failed.");
 
       File sub = new File(localroot, "sub");
-      assertTrue("Copying failed.", sub.isDirectory());
+        assertTrue(sub.isDirectory(), "Copying failed.");
 
       File f3 = new File(sub, "f3");
-      assertTrue("Copying failed.", f3.isFile());
+        assertTrue(f3.isFile(), "Copying failed.");
 
       File f4 = new File(sub, "f4");
-      assertTrue("Copying failed.", f4.isFile());
+        assertTrue(f4.isFile(), "Copying failed.");
 
       File f5 = new File(localroot2, "f1");
-      assertTrue("Copying failed.", f5.isFile());
+        assertTrue(f5.isFile(), "Copying failed.");
 
       f1.delete();
       f2.delete();
@@ -1945,10 +1946,10 @@
           args[0] = "-ls";
           args[1] = "/foo";
           int ret = ToolRunner.run(fshell, args);
-          assertEquals("returned should be 1", 1, ret);
+            assertEquals(1, ret, "returned should be 1");
           String str = out.toString();
-          assertTrue("permission denied printed",
-                     str.indexOf("Permission denied") != -1);
+            assertTrue(
+                    str.indexOf("Permission denied") != -1, "permission denied printed");
           out.reset();
           return null;
         }
@@ -1989,7 +1990,7 @@
     	  try {
     	    assertEquals(exitcode, shell.run(args));
     	  } catch (Exception e) {
-    	    assertTrue(StringUtils.stringifyException(e), false);
+              assertTrue(false, StringUtils.stringifyException(e));
     	  }
     	  return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
     	}
@@ -2079,43 +2080,43 @@
 
     out.reset();
     doFsStat(dfs.getConf(), null, testDir1);
-    assertEquals("Unexpected -stat output: " + out,
-        out.toString(), String.format("%s%n", mtime1));
+      assertEquals(
+              out.toString(), String.format("%s%n", mtime1), "Unexpected -stat output: " + out);
 
     out.reset();
     doFsStat(dfs.getConf(), null, testDir1, testFile2);
-    assertEquals("Unexpected -stat output: " + out,
-        out.toString(), String.format("%s%n%s%n", mtime1, mtime2));
+      assertEquals(
+              out.toString(), String.format("%s%n%s%n", mtime1, mtime2), "Unexpected -stat output: " + out);
 
     doFsStat(dfs.getConf(), "%F %u:%g %b %y %n");
     out.reset();
 
     doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %y %n", testDir1);
-    assertTrue(out.toString(), out.toString().contains(mtime1));
-    assertTrue(out.toString(), out.toString().contains("directory"));
-    assertTrue(out.toString(), out.toString().contains(status1.getGroup()));
-    assertTrue(out.toString(),
-        out.toString().contains(status1.getPermission().toString()));
+      assertTrue(out.toString().contains(mtime1), out.toString());
+      assertTrue(out.toString().contains("directory"), out.toString());
+      assertTrue(out.toString().contains(status1.getGroup()), out.toString());
+      assertTrue(
+              out.toString().contains(status1.getPermission().toString()), out.toString());
 
     int n = status1.getPermission().toShort();
     int octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
-    assertTrue(out.toString(),
-        out.toString().contains(String.valueOf(octal)));
+      assertTrue(
+              out.toString().contains(String.valueOf(octal)), out.toString());
 
     out.reset();
     doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %x %y %n", testDir1, testFile2);
 
     n = status2.getPermission().toShort();
     octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
-    assertTrue(out.toString(), out.toString().contains(mtime1));
-    assertTrue(out.toString(), out.toString().contains(atime1));
-    assertTrue(out.toString(), out.toString().contains("regular file"));
-    assertTrue(out.toString(),
-        out.toString().contains(status2.getPermission().toString()));
-    assertTrue(out.toString(),
-        out.toString().contains(String.valueOf(octal)));
-    assertTrue(out.toString(), out.toString().contains(mtime2));
-    assertTrue(out.toString(), out.toString().contains(atime2));
+      assertTrue(out.toString().contains(mtime1), out.toString());
+      assertTrue(out.toString().contains(atime1), out.toString());
+      assertTrue(out.toString().contains("regular file"), out.toString());
+      assertTrue(
+              out.toString().contains(status2.getPermission().toString()), out.toString());
+      assertTrue(
+              out.toString().contains(String.valueOf(octal)), out.toString());
+      assertTrue(out.toString().contains(mtime2), out.toString());
+      assertTrue(out.toString().contains(atime2), out.toString());
   }
 
   private static void doFsStat(Configuration conf, String format, Path... files)
@@ -2123,8 +2124,8 @@
     if (files == null || files.length == 0) {
       final String[] argv = (format == null ? new String[] {"-stat"} :
           new String[] {"-stat", format});
-      assertEquals("Should have failed with missing arguments",
-          -1, ToolRunner.run(new FsShell(conf), argv));
+        assertEquals(
+                -1, ToolRunner.run(new FsShell(conf), argv), "Should have failed with missing arguments");
     } else {
       List<String> argv = new LinkedList<>();
       argv.add("-stat");
@@ -2136,7 +2137,7 @@
       }
 
       int ret = ToolRunner.run(new FsShell(conf), argv.toArray(new String[0]));
-      assertEquals(argv + " returned non-zero status " + ret, 0, ret);
+        assertEquals(0, ret, argv + " returned non-zero status " + ret);
     }
   }
 
@@ -2198,7 +2199,7 @@
     DFSAdmin admin = new DFSAdmin();
     admin.setConf(conf);
     int res = admin.run(new String[] {"-refreshNodes"});
-    assertEquals("expected to fail -1", res , -1);
+      assertEquals(res, -1, "expected to fail -1");
   }
 
   // Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
@@ -2238,7 +2239,7 @@
       String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
           target1.toUri().toString() };
       int ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -p is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -p is not working");
       FileStatus targetStatus = dfs.getFileStatus(target1);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2257,7 +2258,7 @@
       argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
           target2.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptop is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptop is not working");
       targetStatus = dfs.getFileStatus(target2);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2276,7 +2277,7 @@
       argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
           target3.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptopx is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptopx is not working");
       targetStatus = dfs.getFileStatus(target3);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2297,7 +2298,7 @@
       argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
           target4.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptopa is not working");
       targetStatus = dfs.getFileStatus(target4);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2317,7 +2318,7 @@
       argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
           target5.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptoa is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptoa is not working");
       targetStatus = dfs.getFileStatus(target5);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2460,7 +2461,7 @@
         new String[] { "-cp", cpArgs, src.toUri().toString(),
             target.toUri().toString() };
     final int ret = ToolRunner.run(shell, argv);
-    assertEquals("cp -p is not working", expectedExitCode, ret);
+      assertEquals(expectedExitCode, ret, "cp -p is not working");
     return target;
   }
 
@@ -2469,16 +2470,16 @@
     final Map<String, byte[]> xattrs = fs.getXAttrs(target);
     int expectedCount = 0;
     if (expectRaw) {
-      assertArrayEquals("raw.a1 has incorrect value",
-          RAW_A1_VALUE, xattrs.get(RAW_A1));
+        assertArrayEquals(
+                RAW_A1_VALUE, xattrs.get(RAW_A1), "raw.a1 has incorrect value");
       expectedCount++;
     }
     if (expectVanillaXAttrs) {
-      assertArrayEquals("user.a1 has incorrect value",
-          USER_A1_VALUE, xattrs.get(USER_A1));
+        assertArrayEquals(
+                USER_A1_VALUE, xattrs.get(USER_A1), "user.a1 has incorrect value");
       expectedCount++;
     }
-    assertEquals("xattrs size mismatch", expectedCount, xattrs.size());
+      assertEquals(expectedCount, xattrs.size(), "xattrs size mismatch");
   }
 
   // verify cp -ptopxa option will preserve directory attributes.
@@ -2529,7 +2530,7 @@
       String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(),
           targetDir1.toUri().toString() };
       int ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -p is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -p is not working");
       FileStatus targetStatus = dfs.getFileStatus(targetDir1);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2548,7 +2549,7 @@
       argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(),
           targetDir2.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptop is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptop is not working");
       targetStatus = dfs.getFileStatus(targetDir2);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2567,7 +2568,7 @@
       argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(),
           targetDir3.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptopx is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptopx is not working");
       targetStatus = dfs.getFileStatus(targetDir3);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2588,7 +2589,7 @@
       argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(),
           targetDir4.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptopa is not working");
       targetStatus = dfs.getFileStatus(targetDir4);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2608,7 +2609,7 @@
       argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(),
           targetDir5.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptoa is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptoa is not working");
       targetStatus = dfs.getFileStatus(targetDir5);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2666,7 +2667,7 @@
       String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
           target1.toUri().toString() };
       int ret = ToolRunner.run(shell, argv);
-      assertEquals("cp is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp is not working");
       FileStatus targetStatus = dfs.getFileStatus(target1);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2683,7 +2684,7 @@
       argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
           target2.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+        assertEquals(SUCCESS, ret, "cp -ptopa is not working");
       targetStatus = dfs.getFileStatus(target2);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
@@ -2720,33 +2721,33 @@
       // Tests for put
       String[] argv = new String[] { "-put", "-f", localfilepath, testdir };
       int res = ToolRunner.run(shell, argv);
-      assertEquals("put -f is not working", SUCCESS, res);
+        assertEquals(SUCCESS, res, "put -f is not working");
 
       argv = new String[] { "-put", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
-      assertEquals("put command itself is able to overwrite the file", ERROR,
-          res);
+        assertEquals(ERROR,
+                res, "put command itself is able to overwrite the file");
 
       // Tests for copyFromLocal
       argv = new String[] { "-copyFromLocal", "-f", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
-      assertEquals("copyFromLocal -f is not working", SUCCESS, res);
+        assertEquals(SUCCESS, res, "copyFromLocal -f is not working");
 
       argv = new String[] { "-copyFromLocal", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
-      assertEquals(
-          "copyFromLocal command itself is able to overwrite the file", ERROR,
-          res);
+        assertEquals(ERROR,
+                res,
+                "copyFromLocal command itself is able to overwrite the file");
 
       // Tests for cp
       argv = new String[] { "-cp", "-f", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
-      assertEquals("cp -f is not working", SUCCESS, res);
+        assertEquals(SUCCESS, res, "cp -f is not working");
 
       argv = new String[] { "-cp", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
-      assertEquals("cp command itself is able to overwrite the file", ERROR,
-          res);
+        assertEquals(ERROR,
+                res, "cp command itself is able to overwrite the file");
     } finally {
       if (null != shell)
         shell.close();
@@ -2788,18 +2789,18 @@
       // Tests for put
       String[] argv = new String[] { "-put", localfilepath, testdir };
       int res = ToolRunner.run(shell, argv);
-      assertEquals("put is working", ERROR, res);
+        assertEquals(ERROR, res, "put is working");
       String returned = out.toString();
-      assertTrue(" outputs Permission denied error message",
-          (returned.lastIndexOf("Permission denied") != -1));
+        assertTrue(
+                (returned.lastIndexOf("Permission denied") != -1), " outputs Permission denied error message");
 
       // Tests for copyFromLocal
       argv = new String[] { "-copyFromLocal", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
-      assertEquals("copyFromLocal -f is working", ERROR, res);
+        assertEquals(ERROR, res, "copyFromLocal -f is working");
       returned = out.toString();
-      assertTrue(" outputs Permission denied error message",
-          (returned.lastIndexOf("Permission denied") != -1));
+        assertTrue(
+                (returned.lastIndexOf("Permission denied") != -1), " outputs Permission denied error message");
 
     } finally {
       if (bak != null) {
@@ -2841,8 +2842,8 @@
     try {
       final FileSystem fs = cluster.getFileSystem();
 
-      assertTrue("Unable to create test directory",
-          fs.mkdirs(new Path(testdir)));
+        assertTrue(
+                fs.mkdirs(new Path(testdir)), "Unable to create test directory");
 
       fs.create(hdfsFile, true).close();
 
@@ -2856,18 +2857,18 @@
       final String[] argv = new String[] { "-setrep", "1", hdfsFile.toString() };
 
       try {
-        assertEquals("Command did not return the expected exit code",
-            1, shell.run(argv));
+          assertEquals(
+                  1, shell.run(argv), "Command did not return the expected exit code");
       } finally {
         System.setOut(origOut);
         System.setErr(origErr);
       }
 
-      assertTrue("Error message is not the expected error message"
-          + bao.toString(), bao.toString().startsWith(
-              "setrep: Requested replication factor of 1 is less than "
-                  + "the required minimum of 2 for /tmp/TestDFSShell-"
-                  + "testSetrepLow/testFileForSetrepLow"));
+        assertTrue(bao.toString().startsWith(
+                "setrep: Requested replication factor of 1 is less than "
+                        + "the required minimum of 2 for /tmp/TestDFSShell-"
+                        + "testSetrepLow/testFileForSetrepLow"), "Error message is not the expected error message"
+                + bao.toString());
     } finally {
       shell.close();
       cluster.shutdown();
@@ -2951,19 +2952,19 @@
       final String trashFile = shell.getCurrentTrashDir() + "/" + testFile;
       String[] argv = new String[] { "-rm", testFile };
       int res = ToolRunner.run(shell, argv);
-      assertEquals("rm failed", 0, res);
+        assertEquals(0, res, "rm failed");
 
       if (serverTrash) {
-        // If the server config was set we should use it unconditionally
-        assertTrue("File not in trash", fs.exists(new Path(trashFile)));
+          // If the server config was set we should use it unconditionally
+          assertTrue(fs.exists(new Path(trashFile)), "File not in trash");
       } else if (clientTrash) {
-        // If the server config was not set but the client config was
-        // set then we should use it
-        assertTrue("File not in trashed", fs.exists(new Path(trashFile)));
+          // If the server config was not set but the client config was
+          // set then we should use it
+          assertTrue(fs.exists(new Path(trashFile)), "File not in trashed");
       } else {
-        // If neither was set then we should not have trashed the file
-        assertFalse("File was not removed", fs.exists(new Path(testFile)));
-        assertFalse("File was trashed", fs.exists(new Path(trashFile)));
+          // If neither was set then we should not have trashed the file
+          assertFalse(fs.exists(new Path(testFile)), "File was not removed");
+          assertFalse(fs.exists(new Path(trashFile)), "File was trashed");
       }
     } finally {
       if (fs != null) {
@@ -2993,8 +2994,8 @@
 
     try {
       FileSystem dfs = cluster.getFileSystem();
-      assertTrue("Not a HDFS: " + dfs.getUri(),
-                 dfs instanceof DistributedFileSystem);
+        assertTrue(
+                dfs instanceof DistributedFileSystem, "Not a HDFS: " + dfs.getUri());
 
       // Run appendToFile once, make sure that the target file is
       // created and is of the right size.
@@ -3063,10 +3064,10 @@
         public Object run() throws Exception {
           int ret = ToolRunner.run(fshell, new String[]{
               "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
-          assertEquals("Returned should be 1", 1, ret);
+            assertEquals(1, ret, "Returned should be 1");
           String str = out.toString();
-          assertTrue("Permission denied printed",
-              str.indexOf("Permission denied") != -1);
+            assertTrue(
+                    str.indexOf("Permission denied") != -1, "Permission denied printed");
           out.reset();
           return null;
         }
@@ -3074,7 +3075,7 @@
 
       int ret = ToolRunner.run(fshell, new String[]{
           "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
-      assertEquals("Returned should be 0", 0, ret);
+        assertEquals(0, ret, "Returned should be 0");
       out.reset();
 
       // No permission to read and remove
@@ -3085,18 +3086,18 @@
           // Read
           int ret = ToolRunner.run(fshell, new String[]{
               "-getfattr", "-n", "user.a1", "/foo"});
-          assertEquals("Returned should be 1", 1, ret);
+            assertEquals(1, ret, "Returned should be 1");
           String str = out.toString();
-          assertTrue("Permission denied printed",
-              str.indexOf("Permission denied") != -1);
+            assertTrue(
+                    str.indexOf("Permission denied") != -1, "Permission denied printed");
           out.reset();
           // Remove
           ret = ToolRunner.run(fshell, new String[]{
               "-setfattr", "-x", "user.a1", "/foo"});
-          assertEquals("Returned should be 1", 1, ret);
+            assertEquals(1, ret, "Returned should be 1");
           str = out.toString();
-          assertTrue("Permission denied printed",
-              str.indexOf("Permission denied") != -1);
+            assertTrue(
+                    str.indexOf("Permission denied") != -1, "Permission denied printed");
           out.reset();
           return null;
         }
@@ -3183,8 +3184,8 @@
         ("Incorrect results from getfattr. Expected: ");
       sb.append(expect).append(" Full Result: ");
       sb.append(str);
-      assertTrue(sb.toString(),
-        str.indexOf(expect) != -1);
+        assertTrue(
+                str.indexOf(expect) != -1, sb.toString());
     }
 
     for (int i = 0; i < dontExpectArr.length; i++) {
@@ -3193,8 +3194,8 @@
         ("Incorrect results from getfattr. Didn't Expect: ");
       sb.append(dontExpect).append(" Full Result: ");
       sb.append(str);
-      assertTrue(sb.toString(),
-        str.indexOf(dontExpect) == -1);
+        assertTrue(
+                str.indexOf(dontExpect) == -1, sb.toString());
     }
     out.reset();
   }
@@ -3250,7 +3251,7 @@
         public Object run() throws Exception {
           final int ret = ToolRunner.run(fshell, new String[]{
               "-mkdir", root + "/foo"});
-          assertEquals("Return should be 0", 0, ret);
+            assertEquals(0, ret, "Return should be 0");
           out.reset();
           return null;
         }
@@ -3263,7 +3264,7 @@
           // Give access to "other"
           final int ret = ToolRunner.run(fshell, new String[]{
               "-chmod", "707", root + "/foo"});
-          assertEquals("Return should be 0", 0, ret);
+            assertEquals(0, ret, "Return should be 0");
           out.reset();
           return null;
         }
@@ -3276,7 +3277,7 @@
         public Object run() throws Exception {
           final int ret = ToolRunner.run(fshell, new String[]{
               "-setfattr", "-n", "user.a1", "-v", "1234", root + "/foo"});
-          assertEquals("Returned should be 0", 0, ret);
+            assertEquals(0, ret, "Returned should be 0");
           out.reset();
           return null;
         }
@@ -3289,7 +3290,7 @@
         public Object run() throws Exception {
           final int ret = ToolRunner.run(fshell, new String[]{
               "-setfattr", "-n", "user.a1", "-v", "1234", root + "/foo"});
-          assertEquals("Returned should be 0", 0, ret);
+            assertEquals(0, ret, "Returned should be 0");
           out.reset();
           return null;
         }
@@ -3303,12 +3304,12 @@
           // Read
           int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n",
               "user.a1", root + "/foo" });
-          assertEquals("Returned should be 0", 0, ret);
+            assertEquals(0, ret, "Returned should be 0");
           out.reset();
           // Remove
           ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
               "user.a1", root + "/foo" });
-          assertEquals("Returned should be 0", 0, ret);
+            assertEquals(0, ret, "Returned should be 0");
           out.reset();
           return null;
         }
@@ -3330,7 +3331,7 @@
           // Give access to "other"
           final int ret = ToolRunner.run(fshell, new String[]{
               "-chmod", "700", root + "/foo"});
-          assertEquals("Return should be 0", 0, ret);
+            assertEquals(0, ret, "Return should be 0");
           out.reset();
           return null;
         }
@@ -3344,10 +3345,10 @@
           // set
           int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
               "user.a2", root + "/foo" });
-          assertEquals("Returned should be 1", 1, ret);
+            assertEquals(1, ret, "Returned should be 1");
           final String str = out.toString();
-          assertTrue("Permission denied printed",
-              str.indexOf("Permission denied") != -1);
+            assertTrue(
+                    str.indexOf("Permission denied") != -1, "Permission denied printed");
           out.reset();
           return null;
         }
@@ -3361,10 +3362,10 @@
           // set
           int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
               "user.a2", root + "/foo" });
-          assertEquals("Returned should be 1", 1, ret);
+            assertEquals(1, ret, "Returned should be 1");
           final String str = out.toString();
-          assertTrue("Permission denied printed",
-              str.indexOf("Permission denied") != -1);
+            assertTrue(
+                    str.indexOf("Permission denied") != -1, "Permission denied printed");
           out.reset();
           return null;
         }
@@ -3377,7 +3378,7 @@
           // set
           int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
               "trusted.a3", root + "/foo" });
-          assertEquals("Returned should be 0", 0, ret);
+            assertEquals(0, ret, "Returned should be 0");
           out.reset();
           return null;
         }
@@ -3415,7 +3416,7 @@
       {
         final int ret = ToolRunner.run(fshell, new String[] {
             "-setfattr", "-n", "user.a1", "-v", "1234", p.toString()});
-        assertEquals("Returned should be 0", 0, ret);
+          assertEquals(0, ret, "Returned should be 0");
         out.reset();
       }
 
@@ -3425,8 +3426,8 @@
             int ret = ToolRunner.run(fshell, new String[] {
                 "-getfattr", "-n", "user.a1", p.toString()});
             String str = out.toString();
-            assertTrue("xattr value was incorrectly returned",
-                str.indexOf("1234") == -1);
+              assertTrue(
+                      str.indexOf("1234") == -1, "xattr value was incorrectly returned");
             out.reset();
             return null;
           }
@@ -3436,10 +3437,10 @@
         final int ret = ToolRunner.run(fshell, new String[]{
             "-getfattr", "-n", "user.nonexistent", p.toString()});
         String str = out.toString();
-        assertTrue("xattr value was incorrectly returned",
-          str.indexOf(
-              "getfattr: At least one of the attributes provided was not found")
-               >= 0);
+          assertTrue(
+                  str.indexOf(
+                          "getfattr: At least one of the attributes provided was not found")
+                          >= 0, "xattr value was incorrectly returned");
         out.reset();
       }
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
index 282dcf7..fd63894 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -29,7 +29,7 @@
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestDFSShellGenericOptions {
 
@@ -103,8 +103,8 @@
       ToolRunner.run(shell, args);
       fs = FileSystem.get(DFSUtilClient.getNNUri(
           DFSUtilClient.getNNAddress(namenode)), shell.getConf());
-      assertTrue("Directory does not get created",
-                 fs.isDirectory(new Path("/data")));
+        assertTrue(
+                fs.isDirectory(new Path("/data")), "Directory does not get created");
       fs.delete(new Path("/data"), true);
     } catch (Exception e) {
       System.err.println(e.getMessage());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
index 860794c..0ae8aa7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
@@ -19,8 +19,8 @@
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 
@@ -33,8 +33,8 @@
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
 
 /**
  * This test ensures the appropriate response (successful or failure) from 
@@ -280,7 +280,7 @@
     }
   }
   
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
index 60839dc..aa49669 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
@@ -19,10 +19,7 @@
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -33,15 +30,15 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
-* This test ensures the appropriate response (successful or failure) from
-* the system when the system is started under various storage state and
-* version conditions.
-*/
+ * This test ensures the appropriate response (successful or failure) from
+ * the system when the system is started under various storage state and
+ * version conditions.
+ */
 public class TestDFSStorageStateRecovery {
  
   private static final Logger LOG = LoggerFactory.getLogger(
@@ -443,13 +440,13 @@
     } // end numDirs loop
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     LOG.info("Setting up the directory structures.");
     UpgradeUtilities.initialize();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index aedea3c..c72ff01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -40,11 +40,11 @@
 import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
@@ -54,11 +54,7 @@
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
@@ -88,7 +84,7 @@
     return StripedFileTestUtil.getDefaultECPolicy();
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     /*
      * Initialize erasure coding policy.
@@ -128,7 +124,7 @@
         .setErasureCodingPolicy(dirPath.toString(), ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -214,9 +210,9 @@
       int ret = in.read(startOffset, buf, 0, fileLen);
       assertEquals(remaining, ret);
       for (int i = 0; i < remaining; i++) {
-        Assert.assertEquals("Byte at " + (startOffset + i) + " should be the " +
-                "same",
-            expected[startOffset + i], buf[i]);
+          Assertions.assertEquals(
+                  expected[startOffset + i], buf[i], "Byte at " + (startOffset + i) + " should be the " +
+                  "same");
       }
     }
     in.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 9044a6d..d83e0df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.doThrow;
 
 import java.io.ByteArrayInputStream;
@@ -47,10 +45,10 @@
 import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
 import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.event.Level;
 
@@ -81,7 +79,7 @@
     return StripedFileTestUtil.getDefaultECPolicy();
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     /*
      * Initialize erasure coding policy.
@@ -110,7 +108,7 @@
     fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -206,12 +204,12 @@
     final byte[] bytes = StripedFileTestUtil.generateBytes(blockSize *
         dataBlocks * 3 + cellSize * dataBlocks + cellSize + 123);
     try (FSDataOutputStream os = fs.create(new Path("/ec-file-1"))) {
-      assertFalse(
-          "DFSStripedOutputStream should not have hflush() capability yet!",
-          os.hasCapability(StreamCapability.HFLUSH.getValue()));
-      assertFalse(
-          "DFSStripedOutputStream should not have hsync() capability yet!",
-          os.hasCapability(StreamCapability.HSYNC.getValue()));
+        assertFalse(
+                os.hasCapability(StreamCapability.HFLUSH.getValue()),
+                "DFSStripedOutputStream should not have hflush() capability yet!");
+        assertFalse(
+                os.hasCapability(StreamCapability.HSYNC.getValue()),
+                "DFSStripedOutputStream should not have hsync() capability yet!");
       try (InputStream is = new ByteArrayInputStream(bytes)) {
         IOUtils.copyBytes(is, os, bytes.length);
         os.hflush();
@@ -219,8 +217,8 @@
         os.hsync();
         IOUtils.copyBytes(is, os, bytes.length);
       }
-      assertTrue("stream is not a DFSStripedOutputStream",
-          os.getWrappedStream() instanceof DFSStripedOutputStream);
+        assertTrue(
+                os.getWrappedStream() instanceof DFSStripedOutputStream, "stream is not a DFSStripedOutputStream");
       final DFSStripedOutputStream dfssos =
           (DFSStripedOutputStream) os.getWrappedStream();
       dfssos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
@@ -265,8 +263,8 @@
         spyClient.create("/testExceptionInCloseECFileWithRecoverLease",
             FsPermission.getFileDefault(), EnumSet.of(CreateFlag.CREATE),
             (short) 3, 1024*1024, null, 1024, null);
-    assertTrue("stream should be a DFSStripedOutputStream",
-        dfsOutputStream instanceof DFSStripedOutputStream);
+      assertTrue(
+              dfsOutputStream instanceof DFSStripedOutputStream, "stream should be a DFSStripedOutputStream");
     DFSOutputStream spyDFSOutputStream = Mockito.spy(dfsOutputStream);
     doThrow(new IOException("Emulated IOException in close"))
         .when(spyDFSOutputStream).completeFile(Mockito.any());
@@ -290,8 +288,8 @@
         spyClient.create("/testExceptionInCloseECFileWithoutRecoverLease",
             FsPermission.getFileDefault(), EnumSet.of(CreateFlag.CREATE),
             (short) 3, 1024*1024, null, 1024, null);
-    assertTrue("stream should be a DFSStripedOutputStream",
-        dfsOutputStream instanceof DFSStripedOutputStream);
+      assertTrue(
+              dfsOutputStream instanceof DFSStripedOutputStream, "stream should be a DFSStripedOutputStream");
     DFSOutputStream spyDFSOutputStream = Mockito.spy(dfsOutputStream);
     doThrow(new IOException("Emulated IOException in close"))
         .when(spyDFSOutputStream).completeFile(Mockito.any());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index ff52146..0053f80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -33,8 +33,8 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test striped file write operation with data node failures with fixed
@@ -105,7 +105,7 @@
       cluster.triggerHeartbeats();
       DatanodeInfo[] info = dfs.getClient().datanodeReport(
           DatanodeReportType.LIVE);
-      assertEquals("Mismatches number of live Dns", numDatanodes, info.length);
+        assertEquals(numDatanodes, info.length, "Mismatches number of live Dns");
       final Path dirFile = new Path(dir, "ecfile");
       LambdaTestUtils.intercept(
           IOException.class,
@@ -218,7 +218,7 @@
       cluster.triggerHeartbeats();
       DatanodeInfo[] info = dfs.getClient().datanodeReport(
           DatanodeReportType.LIVE);
-      assertEquals("Mismatches number of live Dns", numDatanodes, info.length);
+        assertEquals(numDatanodes, info.length, "Mismatches number of live Dns");
       Path srcPath = new Path(dir, "testAddBlockWhenNoSufficientParityNodes");
       int fileLength = cellSize - 1000;
       final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
index bbe991d..2f2878c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
@@ -41,8 +41,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
-import org.junit.Assert;
-import org.junit.Before;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -54,8 +54,8 @@
 import java.util.Stack;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Base class for test striped file write operation.
@@ -95,7 +95,7 @@
   /*
    * Initialize erasure coding policy.
    */
-  @Before
+  @BeforeEach
   public void init() {
     ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
     dataBlocks = ecPolicy.getNumDataUnits();
@@ -245,7 +245,7 @@
         final String err = "failed, dn=" + dn + ", length=" + length
             + StringUtils.stringifyException(e);
         LOG.error(err);
-        Assert.fail(err);
+        Assertions.fail(err);
       } finally {
         tearDown();
       }
@@ -389,15 +389,15 @@
       }
 
       if (datanodes != null) {
-        Assert.assertEquals(1, datanodes.length);
-        Assert.assertNotNull(datanodes[0]);
+        Assertions.assertEquals(1, datanodes.length);
+        Assertions.assertNotNull(datanodes[0]);
         return datanodes[0];
       }
 
       try {
         Thread.sleep(100);
       } catch (InterruptedException ie) {
-        Assert.fail(StringUtils.stringifyException(ie));
+        Assertions.fail(StringUtils.stringifyException(ie));
         return null;
       }
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index ddf5461..a3972bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -22,10 +22,7 @@
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
 import static org.apache.hadoop.test.GenericTestUtils.assertExists;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -45,18 +42,17 @@
 import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 
 /**
-* This test ensures the appropriate response (successful or failure) from
-* the system when the system is upgraded under various storage state and
-* version conditions.
-*/
+ * This test ensures the appropriate response (successful or failure) from
+ * the system when the system is upgraded under various storage state and
+ * version conditions.
+ */
 public class TestDFSUpgrade {
  
   // TODO: Avoid hard-coding expected_txid. The test should be more robust.
@@ -171,16 +167,16 @@
     } catch (Exception e) {
       // expect exception
       if (exceptionClass != null) {
-        assertTrue("Caught exception is not of expected class "
-            + exceptionClass.getSimpleName() + ": "
-            + StringUtils.stringifyException(e), 
-            exceptionClass.isInstance(e));
+          assertTrue(
+                  exceptionClass.isInstance(e), "Caught exception is not of expected class "
+                  + exceptionClass.getSimpleName() + ": "
+                  + StringUtils.stringifyException(e));
       }
       if (messagePattern != null) {
-        assertTrue("Caught exception message string does not match expected pattern \""
-            + messagePattern.pattern() + "\" : "
-            + StringUtils.stringifyException(e), 
-            messagePattern.matcher(e.getMessage()).find());
+          assertTrue(
+                  messagePattern.matcher(e.getMessage()).find(), "Caught exception message string does not match expected pattern \""
+                  + messagePattern.pattern() + "\" : "
+                  + StringUtils.stringifyException(e));
       }
       LOG.info("Successfully detected expected NameNode startup failure.");
     }
@@ -195,8 +191,8 @@
    */
   void startBlockPoolShouldFail(StartupOption operation, String bpid) throws IOException {
     cluster.startDataNodes(conf, 1, false, operation, null); // should fail
-    assertFalse("Block pool " + bpid + " should have failed to start",
-        cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
+      assertFalse(
+              cluster.getDataNodes().get(0).isBPServiceAlive(bpid), "Block pool " + bpid + " should have failed to start");
   }
  
   /**
@@ -212,7 +208,7 @@
                                            .build();
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void initialize() throws Exception {
     UpgradeUtilities.initialize();
   }
@@ -404,7 +400,7 @@
    * Stand-alone test to detect failure of one SD during parallel upgrade.
    * At this time, can only be done with manual hack of {@link FSImage.doUpgrade()}
    */
-  @Ignore
+  @Disabled
   public void testUpgrade4() throws Exception {
     int numDirs = 4;
     conf = new HdfsConfiguration();
@@ -432,7 +428,7 @@
       File currentDir = new File(baseDir, "current");
       for (File f : currentDir.listFiles()) {
         if (f.getName().startsWith(prefix)) {
-          assertTrue("Deleting " + f, f.delete());
+            assertTrue(f.delete(), "Deleting " + f);
         }
       }
     }
@@ -446,7 +442,7 @@
     fail("Expected IOException is not thrown");
   }
   
-  @Ignore
+  @Disabled
   public void test203LayoutVersion() {
     for (int lv : Storage.LAYOUT_VERSIONS_203) {
       assertTrue(Storage.is203LayoutVersion(lv));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 5469ebb..27a2396 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -48,9 +48,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Logger;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * This tests data transfer protocol handling in the Datanode. It sends
@@ -161,7 +161,7 @@
       // The paths are expected to be listed in the same order 
       // as they are traversed here.
       assertEquals(info.path, path);
-      assertEquals("Checking checksum for " + path, info.checksum, checksum);
+        assertEquals(info.checksum, checksum, "Checking checksum for " + path);
     }
   }
   
@@ -251,9 +251,9 @@
 
     // Set up a fake NN storage that looks like an ancient Hadoop dir circa 0.3.0
     FileUtil.fullyDelete(namenodeStorage);
-    assertTrue("Make " + namenodeStorage, namenodeStorage.mkdirs());
+      assertTrue(namenodeStorage.mkdirs(), "Make " + namenodeStorage);
     File imageDir = new File(namenodeStorage, "image");
-    assertTrue("Make " + imageDir, imageDir.mkdirs());
+      assertTrue(imageDir.mkdirs(), "Make " + imageDir);
 
     // Hex dump of a formatted image from Hadoop 0.3.0
     File imageFile = new File(imageDir, "fsimage");
@@ -333,7 +333,7 @@
       }
       int md5failures = appender.countExceptionsWithMessage(
           " is corrupt with MD5 checksum of ");
-      assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
+        assertEquals(1, md5failures, "Upgrade did not fail with bad MD5");
     }
   }
 
@@ -395,10 +395,10 @@
           }
         }
         for (String s: expected) {
-          assertTrue("Did not find expected path " + s, found.contains(s));
+            assertTrue(found.contains(s), "Did not find expected path " + s);
         }
-        assertEquals("Found an unexpected path while listing filesystem",
-            found.size(), expected.length);
+          assertEquals(
+                  found.size(), expected.length, "Found an unexpected path while listing filesystem");
       }
     } finally {
       if (cluster != null) {
@@ -459,10 +459,10 @@
           }
         }
         for (String s: expected) {
-          assertTrue("Did not find expected path " + s, found.contains(s));
+            assertTrue(found.contains(s), "Did not find expected path " + s);
         }
-        assertEquals("Found an unexpected path while listing filesystem",
-            found.size(), expected.length);
+          assertEquals(
+                  found.size(), expected.length, "Found an unexpected path while listing filesystem");
       }
     } finally {
       if (cluster != null) {
@@ -554,10 +554,10 @@
           }
         }
         for (String s: expected) {
-          assertTrue("Did not find expected path " + s, found.contains(s));
+            assertTrue(found.contains(s), "Did not find expected path " + s);
         }
-        assertEquals("Found an unexpected path while listing filesystem",
-            found.size(), expected.length);
+          assertEquals(
+                  found.size(), expected.length, "Found an unexpected path while listing filesystem");
       }
     } finally {
       if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 9a024c3..3e5a76d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -38,13 +38,8 @@
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
 import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -81,9 +76,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Sets;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestDFSUtil {
 
@@ -96,7 +91,7 @@
   /**
    * Reset to default UGI settings since some tests change them.
    */
-  @Before
+  @BeforeEach
   public void resetUGI() {
     UserGroupInformation.setConfiguration(new Configuration());
   }
@@ -127,8 +122,8 @@
 
     BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
 
-    assertTrue("expected 2 blocks but got " + bs.length,
-               bs.length == 2);
+      assertTrue(
+              bs.length == 2, "expected 2 blocks but got " + bs.length);
 
     int corruptCount = 0;
     for (BlockLocation b: bs) {
@@ -137,8 +132,8 @@
       }
     }
 
-    assertTrue("expected 1 corrupt files but got " + corruptCount,
-        corruptCount == 1);
+      assertTrue(
+              corruptCount == 1, "expected 1 corrupt files but got " + corruptCount);
 
     // test an empty location
     bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
@@ -684,11 +679,11 @@
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
 
     Collection<URI> uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 2, uris.size());
-    assertTrue("Missing URI for name service ns1",
-        uris.contains(new URI("hdfs://" + NS1_NN1_ADDR)));
-    assertTrue("Missing URI for service address",
-        uris.contains(new URI("hdfs://" + NN2_ADDR)));
+      assertEquals(2, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NS1_NN1_ADDR)), "Missing URI for name service ns1");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN2_ADDR)), "Missing URI for service address");
 
     conf = new HdfsConfiguration();
     conf.set(DFS_NAMESERVICES, "ns1,ns2");
@@ -721,15 +716,15 @@
         + "IPFailoverProxyProvider");
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 3, uris.size());
-    assertTrue("Missing URI for RPC address",
-        uris.contains(new URI("hdfs://" + NN1_ADDR)));
-    assertTrue("Missing URI for name service ns2",
-        uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
-            NS1_NN_ADDR)));
-    assertTrue("Missing URI for name service ns2",
-        uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
-            NS2_NN_ADDR)));
+      assertEquals(3, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address");
+      assertTrue(
+              uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
+                      NS1_NN_ADDR)), "Missing URI for name service ns2");
+      assertTrue(
+              uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
+                      NS2_NN_ADDR)), "Missing URI for name service ns2");
 
     /**
      * Second, test ns1 with {@link ConfiguredFailoverProxyProvider} which does
@@ -740,57 +735,57 @@
         + "ConfiguredFailoverProxyProvider");
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 3, uris.size());
-    assertTrue("Missing URI for name service ns1",
-        uris.contains(new URI("hdfs://ns1")));
-    assertTrue("Missing URI for name service ns2",
-        uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
-    assertTrue("Missing URI for RPC address",
-        uris.contains(new URI("hdfs://" + NN1_ADDR)));
+      assertEquals(3, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://ns1")), "Missing URI for name service ns1");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NS2_NN_ADDR)), "Missing URI for name service ns2");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address");
 
     // Make sure that non-HDFS URIs in fs.defaultFS don't get included.
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
         "viewfs://vfs-name.example.com");
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 3, uris.size());
-    assertTrue("Missing URI for name service ns1",
-        uris.contains(new URI("hdfs://ns1")));
-    assertTrue("Missing URI for name service ns2",
-        uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
-    assertTrue("Missing URI for RPC address",
-        uris.contains(new URI("hdfs://" + NN1_ADDR)));
+      assertEquals(3, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://ns1")), "Missing URI for name service ns1");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NS2_NN_ADDR)), "Missing URI for name service ns2");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address");
 
     // Make sure that an HA URI being the default URI doesn't result in multiple
     // entries being returned.
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
     
     uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 3, uris.size());
-    assertTrue("Missing URI for name service ns1",
-        uris.contains(new URI("hdfs://ns1")));
-    assertTrue("Missing URI for name service ns2",
-        uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
-    assertTrue("Missing URI for RPC address",
-        uris.contains(new URI("hdfs://" + NN1_ADDR)));
+      assertEquals(3, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://ns1")), "Missing URI for name service ns1");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NS2_NN_ADDR)), "Missing URI for name service ns2");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address");
 
     // Check that the default URI is returned if there's nothing else to return.
     conf = new HdfsConfiguration();
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 1, uris.size());
-    assertTrue("Missing URI for RPC address (defaultFS)",
-        uris.contains(new URI("hdfs://" + NN1_ADDR)));
+      assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for RPC address (defaultFS)");
 
     // Check that the RPC address is the only address returned when the RPC
     // and the default FS is given.
     conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN2_ADDR);
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 1, uris.size());
-    assertTrue("Missing URI for RPC address",
-        uris.contains(new URI("hdfs://" + NN2_ADDR)));
+      assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN2_ADDR)), "Missing URI for RPC address");
 
     // Make sure that when a service RPC address is used that is distinct from
     // the client RPC address, and that client RPC address is also used as the
@@ -799,9 +794,9 @@
     conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_ADDR);
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 1, uris.size());
-    assertTrue("Missing URI for service ns1",
-        uris.contains(new URI("hdfs://" + NN1_ADDR)));
+      assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN1_ADDR)), "Missing URI for service ns1");
 
     // Check that when the default FS and service address are given, but
     // the RPC address isn't, that only the service address is returned.
@@ -810,9 +805,9 @@
     conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR);
     
     uris = DFSUtil.getInternalNsRpcUris(conf);
-    assertEquals("Incorrect number of URIs returned", 1, uris.size());
-    assertTrue("Missing URI for service address",
-        uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
+      assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)), "Missing URI for service address");
   }
 
   @Test
@@ -840,9 +835,9 @@
 
     Collection<URI> uris = DFSUtil.getInternalNsRpcUris(conf);
 
-    assertEquals("Incorrect number of URIs returned", 1, uris.size());
-    assertTrue("Missing URI for name service ns1",
-        uris.contains(new URI("hdfs://ns1")));
+      assertEquals(1, uris.size(), "Incorrect number of URIs returned");
+      assertTrue(
+              uris.contains(new URI("hdfs://ns1")), "Missing URI for name service ns1");
   }
 
   @Test (timeout=15000)
@@ -877,19 +872,19 @@
     HdfsConfiguration conf = new HdfsConfiguration();
     String defaultKey = "default.spengo.key";
     conf.unset(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
-    assertEquals("Test spnego key in config is null", defaultKey,
-        DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
+      assertEquals(defaultKey,
+              DFSUtil.getSpnegoKeytabKey(conf, defaultKey), "Test spnego key in config is null");
 
     conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, "");
-    assertEquals("Test spnego key is empty", defaultKey,
-        DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
+      assertEquals(defaultKey,
+              DFSUtil.getSpnegoKeytabKey(conf, defaultKey), "Test spnego key is empty");
 
     String spengoKey = "spengo.key";
     conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
         spengoKey);
-    assertEquals("Test spnego key is NOT null",
-        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
-        DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
+      assertEquals(
+              DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+              DFSUtil.getSpnegoKeytabKey(conf, defaultKey), "Test spnego key is NOT null");
   }
 
   @Test(timeout=10000)
@@ -1016,15 +1011,15 @@
         DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY).getCredential());
 
     // use WebAppUtils as would be used by loadSslConfiguration
-    Assert.assertEquals("keypass",
+    Assertions.assertEquals("keypass",
         DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
-    Assert.assertEquals("storepass",
+    Assertions.assertEquals("storepass",
         DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
-    Assert.assertEquals("trustpass",
+    Assertions.assertEquals("trustpass",
         DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
 
     // let's make sure that a password that doesn't exist returns null
-    Assert.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias"));
+    Assertions.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias"));
   }
 
   @Test
@@ -1065,20 +1060,20 @@
   public void testEncryptionProbe() throws Throwable {
     Configuration conf = new Configuration(false);
     conf.unset(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
-    assertFalse("encryption enabled on no provider key",
-        DFSUtilClient.isHDFSEncryptionEnabled(conf));
+      assertFalse(
+              DFSUtilClient.isHDFSEncryptionEnabled(conf), "encryption enabled on no provider key");
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "");
-    assertFalse("encryption enabled on empty provider key",
-        DFSUtilClient.isHDFSEncryptionEnabled(conf));
+      assertFalse(
+              DFSUtilClient.isHDFSEncryptionEnabled(conf), "encryption enabled on empty provider key");
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "\n\t\n");
-    assertFalse("encryption enabled on whitespace provider key",
-        DFSUtilClient.isHDFSEncryptionEnabled(conf));
+      assertFalse(
+              DFSUtilClient.isHDFSEncryptionEnabled(conf), "encryption enabled on whitespace provider key");
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "http://hadoop.apache.org");
-    assertTrue("encryption disabled on valid provider key",
-        DFSUtilClient.isHDFSEncryptionEnabled(conf));
+      assertTrue(
+              DFSUtilClient.isHDFSEncryptionEnabled(conf), "encryption disabled on valid provider key");
 
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
index c57ef94..0cb732c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
@@ -27,15 +27,15 @@
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestDataStream {
   static MiniDFSCluster cluster;
   static int PACKET_SIZE = 1024;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     Configuration conf = new Configuration();
     conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
@@ -77,7 +77,7 @@
         "Slow ReadProcessor read fields for block");
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     cluster.shutdown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
index 9881f92..6d4297d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
@@ -23,10 +23,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.InputStream;
 
@@ -40,9 +37,9 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.util.function.Supplier;
 
@@ -55,7 +52,7 @@
   private static final int KEEPALIVE_TIMEOUT = 1000;
   private static final int WRITE_TIMEOUT = 3000;
   
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
         KEEPALIVE_TIMEOUT);
@@ -67,7 +64,7 @@
     dn = cluster.getDataNodes().get(0);
   }
   
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index b1a675c..2e5097c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
index 989e9fc..0a56677 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
@@ -18,10 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,9 +32,9 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Tests if a data-node can startup depending on configuration parameters.
@@ -47,7 +45,7 @@
 
   private static MiniDFSCluster cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     clearBaseDir();
     Configuration conf = new HdfsConfiguration();
@@ -59,7 +57,7 @@
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if(cluster != null)
       cluster.shutdown();
@@ -95,7 +93,7 @@
         dn.shutdown();
       }
     }
-    assertNull("Data-node startup should have failed.", dn);
+      assertNull(dn, "Data-node startup should have failed.");
 
     // 2. Test "file:" ecPolicy and no ecPolicy (path-only). Both should work.
     String dnDir1 = fileAsURI(dataDir).toString() + "1";
@@ -106,7 +104,7 @@
                 dnDir1 + "," + dnDir2 + "," + dnDir3);
     try {
       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
-      assertTrue("Data-node should startup.", cluster.isDataNodeUp());
+        assertTrue(cluster.isDataNodeUp(), "Data-node should startup.");
     } finally {
       if (cluster != null) {
         cluster.shutdownDataNodes();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
index c5141f3..ff94dee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
@@ -34,7 +34,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -96,7 +96,7 @@
           checkFile(fs, filename, replication, numBlocks, fileSize, myseed);
         } catch (Throwable e) {
           System.out.println("Workload exception " + e);
-          assertTrue(e.toString(), false);
+            assertTrue(false, e.toString());
         }
 
         // increment the stamp to indicate that another file is done.
@@ -148,9 +148,9 @@
     int attempt = 0;
 
     long len = fileSys.getFileStatus(name).getLen();
-    assertTrue(name + " should be of size " + filesize +
-               " but found to be of size " + len, 
-               len == filesize);
+      assertTrue(
+              len == filesize, name + " should be of size " + filesize +
+              " but found to be of size " + len);
 
     // wait till all full blocks are confirmed by the datanodes.
     while (!done) {
@@ -198,9 +198,9 @@
 
   private static void checkData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                        expected[from+idx]+" actual "+actual[idx],
-                        actual[idx], expected[from+idx]);
+        assertEquals(
+                actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }
@@ -259,7 +259,7 @@
             // cluster.startDataNodes(conf, 1, true, null, null);
           } catch (IOException e) {
             System.out.println("TestDatanodeDeath Modify exception " + e);
-            assertTrue("TestDatanodeDeath Modify exception " + e, false);
+              assertTrue(false, "TestDatanodeDeath Modify exception " + e);
             running = false;
           }
         }
@@ -399,7 +399,7 @@
     } catch (Throwable e) {
       System.out.println("Simple Workload exception " + e);
       e.printStackTrace();
-      assertTrue(e.toString(), false);
+        assertTrue(false, e.toString());
     } finally {
       fs.close();
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
index 0e2f4e4..6d23f3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
@@ -20,7 +20,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
index f029ee5..ed9798b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
@@ -39,7 +39,7 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.VersionInfo;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.util.function.Supplier;
 
@@ -47,7 +47,7 @@
 import java.security.Permission;
 import java.util.concurrent.TimeoutException;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
@@ -88,7 +88,7 @@
       cluster.waitActive();
       
       int initialLookups = sm.lookups;
-      assertTrue("dns security manager is active", initialLookups != 0);
+        assertTrue(initialLookups != 0, "dns security manager is active");
       
       DatanodeManager dm =
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
@@ -198,7 +198,7 @@
       rpcServer.registerDatanode(dnReg);
 
       DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
-      assertEquals("Expected a registered datanode", 1, report.length);
+        assertEquals(1, report.length, "Expected a registered datanode");
 
       // register the same datanode again with a different storage ID
       dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
@@ -209,8 +209,8 @@
       rpcServer.registerDatanode(dnReg);
 
       report = client.datanodeReport(DatanodeReportType.ALL);
-      assertEquals("Datanode with changed storage ID not recognized",
-          1, report.length);
+        assertEquals(
+                1, report.length, "Datanode with changed storage ID not recognized");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -366,16 +366,16 @@
       waitForHeartbeat(dn, dnd);
       assertTrue(dnd.isRegistered());
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
-      assertTrue("block report is not processed for DN " + dnd,
-          waitForBlockReport(dn, dnd));
+        assertTrue(
+                waitForBlockReport(dn, dnd), "block report is not processed for DN " + dnd);
       assertTrue(dnd.isRegistered());
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
       // check that block report is not processed and registration didn't
       // change.
       dnd.setForceRegistration(true);
-      assertFalse("block report is processed for DN " + dnd,
-          waitForBlockReport(dn, dnd));
+        assertFalse(
+                waitForBlockReport(dn, dnd), "block report is processed for DN " + dnd);
       assertFalse(dnd.isRegistered());
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
@@ -386,8 +386,8 @@
       newReg = dn.getDNRegistrationForBP(bpId);
       assertNotSame(lastReg, newReg);
       lastReg = newReg;
-      assertTrue("block report is not processed for DN " + dnd,
-          waitForBlockReport(dn, dnd));
+        assertTrue(
+                waitForBlockReport(dn, dnd), "block report is not processed for DN " + dnd);
       assertTrue(dnd.isRegistered());
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
@@ -407,7 +407,7 @@
       } catch (NullPointerException npe) {
         failed = true;
       }
-      assertTrue("didn't fail", failed);
+        assertTrue(failed, "didn't fail");
       assertFalse(dnd.isRegistered());
 
       // should remain unregistered until next heartbeat.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
index de738ee..defd591 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -19,7 +19,7 @@
 
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -42,8 +42,8 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test ensures the all types of data node report work correctly.
@@ -167,7 +167,7 @@
       cluster.corruptBlockOnDataNodesByDeletingBlockFile(b);
       try {
         DFSTestUtil.readFile(fs, p);
-        Assert.fail("Must throw exception as the block doesn't exists on disk");
+        Assertions.fail("Must throw exception as the block doesn't exists on disk");
       } catch (IOException e) {
         // all bad datanodes
       }
@@ -178,7 +178,7 @@
         if (0 != lb.getLocations().length) {
           retries++;
           if (retries > 7) {
-            Assert.fail("getLocatedBlocks failed after 7 retries");
+            Assertions.fail("getLocatedBlocks failed after 7 retries");
           }
           Thread.sleep(2000);
         } else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java
index 659a8c1..0cd7db0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java
@@ -29,9 +29,9 @@
 
 import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.ClusterVerifier;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java
index e8da918..c757a53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java
@@ -26,10 +26,10 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.io.IOException;
@@ -44,12 +44,7 @@
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_PROBE_SUSPECT_NODE_INTERVAL_MS_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Tests for dead node detection in DFSClient.
@@ -59,7 +54,7 @@
   private MiniDFSCluster cluster;
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     cluster = null;
     conf = new HdfsConfiguration();
@@ -77,7 +72,7 @@
     conf.setLong(DFS_CLIENT_DEAD_NODE_DETECTION_IDLE_SLEEP_MS_KEY, 100);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -328,15 +323,15 @@
       }
       waitForSuspectNode(din.getDFSClient());
       cluster.restartDataNode(one, true);
-      Assert.assertEquals(1,
+      Assertions.assertEquals(1,
           deadNodeDetector.getSuspectNodesProbeQueue().size());
-      Assert.assertEquals(0,
+      Assertions.assertEquals(0,
           deadNodeDetector.clearAndGetDetectedDeadNodes().size());
       deadNodeDetector.startProbeScheduler();
       Thread.sleep(1000);
-      Assert.assertEquals(0,
+      Assertions.assertEquals(0,
           deadNodeDetector.getSuspectNodesProbeQueue().size());
-      Assert.assertEquals(0,
+      Assertions.assertEquals(0,
           deadNodeDetector.clearAndGetDetectedDeadNodes().size());
     } finally {
       in.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index f7e6dce..8af1e13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -17,11 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -77,9 +73,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Disabled;
 import org.eclipse.jetty.util.ajax.JSON;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -106,9 +102,9 @@
   private static String checkFile(FileSystem fileSys, Path name, int repl,
     String downnode, int numDatanodes) throws IOException {
     boolean isNodeDown = (downnode != null);
-    // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(),
-        fileSys instanceof DistributedFileSystem);
+      // need a raw stream
+      assertTrue(
+              fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
     HdfsDataInputStream dis = (HdfsDataInputStream)
         fileSys.open(name);
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
@@ -223,8 +219,8 @@
 
     // Ensure decommissioned datanode is not automatically shutdown
     DFSClient client = getDfsClient(0);
-    assertEquals("All datanodes must be alive", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
     assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
         numDatanodes));
     cleanupFile(fileSys, file1);
@@ -397,8 +393,8 @@
 
         // Ensure decommissioned datanode is not automatically shutdown
         DFSClient client = getDfsClient(i);
-        assertEquals("All datanodes must be alive", numDatanodes, 
-            client.datanodeReport(DatanodeReportType.LIVE).length);
+          assertEquals(numDatanodes,
+                  client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
         // wait for the block to be replicated
         int tries = 0;
         while (tries++ < 20) {
@@ -411,8 +407,8 @@
           } catch (InterruptedException ie) {
           }
         }
-        assertTrue("Checked if block was replicated after decommission, tried "
-            + tries + " times.", tries < 20);
+          assertTrue(tries < 20, "Checked if block was replicated after decommission, tried "
+                  + tries + " times.");
         cleanupFile(fileSys, file1);
       }
     }
@@ -445,8 +441,8 @@
 
       // Decommission one of the datanodes with a replica
       BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
-      assertEquals("Unexpected number of replicas from getFileBlockLocations",
-          replicas, loc.getHosts().length);
+        assertEquals(
+                replicas, loc.getHosts().length, "Unexpected number of replicas from getFileBlockLocations");
       final String toDecomHost = loc.getNames()[0];
       String toDecomUuid = null;
       for (DataNode d : getCluster().getDataNodes()) {
@@ -455,7 +451,7 @@
           break;
         }
       }
-      assertNotNull("Could not find a dn with the block!", toDecomUuid);
+        assertNotNull(toDecomUuid, "Could not find a dn with the block!");
       final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid,
           0, decommissionedNodes, AdminStates.DECOMMISSIONED);
       decommissionedNodes.add(decomNode);
@@ -467,8 +463,8 @@
 
       // Ensure decommissioned datanode is not automatically shutdown
       DFSClient client = getDfsClient(0);
-      assertEquals("All datanodes must be alive", numDatanodes,
-          client.datanodeReport(DatanodeReportType.LIVE).length);
+        assertEquals(numDatanodes,
+                client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
 
       // wait for the block to be replicated
       final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
@@ -557,7 +553,7 @@
         break;
       }
     }
-    assertNotNull("Could not find decomNode in cluster!", decomNode);
+      assertNotNull(decomNode, "Could not find decomNode in cluster!");
     return decomNode;
   }
 
@@ -603,14 +599,14 @@
         Thread.sleep(HEARTBEAT_INTERVAL * 1000);
         info = client.datanodeReport(DatanodeReportType.LIVE);
       }
-      assertEquals("Number of live nodes should be 0", 0, info.length);
+        assertEquals(0, info.length, "Number of live nodes should be 0");
       
       // Test that bogus hostnames are considered "dead".
       // The dead report should have an entry for the bogus entry in the hosts
       // file.  The original datanode is excluded from the report because it
       // is no longer in the included list.
       info = client.datanodeReport(DatanodeReportType.DEAD);
-      assertEquals("There should be 1 dead node", 1, info.length);
+        assertEquals(1, info.length, "There should be 1 dead node");
       assertEquals(bogusIp, info[0].getHostName());
     }
   }
@@ -926,7 +922,7 @@
           OpenFilesIterator.FILTER_PATH_DEFAULT);
       assertEquals(0, batchedListEntries.size());
     } catch (NullPointerException e) {
-      Assert.fail("Should not throw NPE when the file is not under " +
+      Assertions.fail("Should not throw NPE when the file is not under " +
           "construction but has lease!");
     }
     initExcludeHost("");
@@ -1152,20 +1148,20 @@
     getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
     numDatanodes+=1;
 
-    assertEquals("Number of datanodes should be 2 ", 2,
-        getCluster().getDataNodes().size());
+      assertEquals(2,
+              getCluster().getDataNodes().size(), "Number of datanodes should be 2 ");
     //Restart the namenode
     getCluster().restartNameNode();
     DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
         getCluster().getNamesystem(), excludedDatanodeID);
     waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
 
-    // Ensure decommissioned datanode is not automatically shutdown
-    assertEquals("All datanodes must be alive", numDatanodes, 
-        client.datanodeReport(DatanodeReportType.LIVE).length);
-    assertTrue("Checked if block was replicated after decommission.",
-        checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
-        numDatanodes) == null);
+      // Ensure decommissioned datanode is not automatically shutdown
+      assertEquals(numDatanodes,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
+      assertTrue(
+              checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
+                      numDatanodes) == null, "Checked if block was replicated after decommission.");
 
     cleanupFile(fileSys, file1);
     // Restart the cluster and ensure recommissioned datanodes
@@ -1203,10 +1199,10 @@
     //Restart the namenode
     getCluster().restartNameNode();
 
-    assertEquals("There should be one node alive", 1,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
-    assertEquals("There should be one node dead", 1,
-        client.datanodeReport(DatanodeReportType.DEAD).length);
+      assertEquals(1,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "There should be one node alive");
+      assertEquals(1,
+              client.datanodeReport(DatanodeReportType.DEAD).length, "There should be one node dead");
   }
 
   /**
@@ -1223,7 +1219,7 @@
    * It is not recommended to use a registration name which is not also a
    * valid DNS hostname for the DataNode.  See HDFS-5237 for background.
    */
-  @Ignore
+  @Disabled
   @Test(timeout=360000)
   public void testIncludeByRegistrationName() throws Exception {
     // Any IPv4 address starting with 127 functions as a "loopback" address
@@ -1277,8 +1273,8 @@
         try {
           DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
           if (info.length == 1) {
-            Assert.assertFalse(info[0].isDecommissioned());
-            Assert.assertFalse(info[0].isDecommissionInProgress());
+            Assertions.assertFalse(info[0].isDecommissioned());
+            Assertions.assertFalse(info[0].isDecommissionInProgress());
             assertEquals(registrationName, info[0].getHostName());
             return true;
           }
@@ -1334,8 +1330,8 @@
     }
     // Run decom scan and check
     BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
-    assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes, 
-        decomManager.getNumNodesChecked());
+      assertEquals(expectedNumCheckedNodes,
+              decomManager.getNumNodesChecked(), "Unexpected # of nodes checked");
     // Recommission all nodes
     for (DatanodeInfo dn : decommissionedNodes) {
       putNodeInService(0, dn);
@@ -1446,10 +1442,10 @@
 
   private void assertTrackedAndPending(DatanodeAdminManager decomManager,
       int tracked, int pending) {
-    assertEquals("Unexpected number of tracked nodes", tracked,
-        decomManager.getNumTrackedNodes());
-    assertEquals("Unexpected number of pending nodes", pending,
-        decomManager.getNumPendingNodes());
+      assertEquals(tracked,
+              decomManager.getNumTrackedNodes(), "Unexpected number of tracked nodes");
+      assertEquals(pending,
+              decomManager.getNumPendingNodes(), "Unexpected number of pending nodes");
   }
 
   /**
@@ -1604,12 +1600,12 @@
     long newTotalCapacity = datanodeStatistics.getCapacityTotal();
     long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
 
-    assertTrue("DfsUsedCapacity should not be the same after a node has " +
-        "been decommissioned!", initialUsedCapacity != newUsedCapacity);
-    assertTrue("TotalCapacity should not be the same after a node has " +
-        "been decommissioned!", initialTotalCapacity != newTotalCapacity);
-    assertTrue("BlockPoolUsed should not be the same after a node has " +
-        "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed);
+      assertTrue(initialUsedCapacity != newUsedCapacity, "DfsUsedCapacity should not be the same after a node has " +
+              "been decommissioned!");
+      assertTrue(initialTotalCapacity != newTotalCapacity, "TotalCapacity should not be the same after a node has " +
+              "been decommissioned!");
+      assertTrue(initialBlockPoolUsed != newBlockPoolUsed, "BlockPoolUsed should not be the same after a node has " +
+              "been decommissioned!");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java
index 9c37a19..99659c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java
@@ -22,7 +22,7 @@
     .DatanodeAdminBackoffMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement
     .DatanodeAdminMonitorInterface;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index c68cb17..59c1743 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -63,10 +60,10 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -113,7 +110,7 @@
     return new HdfsConfiguration();
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = createConfiguration();
     // Set up the hosts/exclude files.
@@ -162,7 +159,7 @@
         StripedFileTestUtil.getDefaultECPolicy().getName());
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     cleanupFile(localFileSys, decommissionDir);
     if (cluster != null) {
@@ -209,7 +206,7 @@
     final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
     int writeBytes = cellSize * dataBlocks * 2;
     writeStripedFile(dfs, ecFile, writeBytes);
-    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+    Assertions.assertEquals(0, bm.numOfUnderReplicatedBlocks());
 
     final List<DatanodeInfo> decommisionNodes = new ArrayList<DatanodeInfo>();
     LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
@@ -254,7 +251,7 @@
           decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
         } catch (Exception e) {
           LOG.error("Exception while decommissioning", e);
-          Assert.fail("Shouldn't throw exception!");
+          Assertions.fail("Shouldn't throw exception!");
         }
       };
     };
@@ -279,9 +276,9 @@
     assertEquals(liveDecommissioned + decommisionNodes.size(),
         fsn.getNumDecomLiveDataNodes());
 
-    // Ensure decommissioned datanode is not automatically shutdown
-    assertEquals("All datanodes must be alive", numDNs,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      // Ensure decommissioned datanode is not automatically shutdown
+      assertEquals(numDNs,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
 
     assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
     StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
@@ -301,7 +298,7 @@
     final Path ecFile = new Path(ecDir, "testDecommissionWithBusyNode");
     int writeBytes = cellSize * dataBlocks;
     writeStripedFile(dfs, ecFile, writeBytes);
-    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+    Assertions.assertEquals(0, bm.numOfUnderReplicatedBlocks());
     FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
 
     //2. make once DN busy
@@ -325,9 +322,9 @@
     //4. wait for decommission block to replicate
     Thread.sleep(3000);
     DatanodeStorageInfo[] newDnStorageInfos = bm.getStorages(firstBlock);
-    Assert.assertEquals("Busy DN shouldn't be reconstructed",
-        dnStorageInfos[busyDNIndex].getStorageID(),
-        newDnStorageInfos[busyDNIndex].getStorageID());
+      Assertions.assertEquals(
+              dnStorageInfos[busyDNIndex].getStorageID(),
+              newDnStorageInfos[busyDNIndex].getStorageID(), "Busy DN shouldn't be reconstructed");
 
     //5. check decommission DN block index, it should be reconstructed again
     LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
@@ -340,12 +337,12 @@
       }
     }
 
-    Assert.assertEquals("Decommission DN block should be reconstructed", 2,
-        decommissionBlockIndexCount);
+      Assertions.assertEquals(2,
+              decommissionBlockIndexCount, "Decommission DN block should be reconstructed");
 
     FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
-    Assert.assertTrue("Checksum mismatches!",
-        fileChecksum1.equals(fileChecksum2));
+      Assertions.assertTrue(
+              fileChecksum1.equals(fileChecksum2), "Checksum mismatches!");
   }
 
   /**
@@ -363,7 +360,7 @@
     int writeBytes = cellSize * dataBlocks;
     writeStripedFile(dfs, ecFile, writeBytes);
 
-    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+    Assertions.assertEquals(0, bm.numOfUnderReplicatedBlocks());
     FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
 
     //2. make once DN busy
@@ -398,13 +395,13 @@
 
     //7. Busy DN shouldn't be reconstructed
     DatanodeStorageInfo[] newDnStorageInfos = bm.getStorages(firstBlock);
-    Assert.assertEquals("Busy DN shouldn't be reconstructed",
-        dnStorageInfos[busyDNIndex].getStorageID(),
-        newDnStorageInfos[busyDNIndex].getStorageID());
+      Assertions.assertEquals(
+              dnStorageInfos[busyDNIndex].getStorageID(),
+              newDnStorageInfos[busyDNIndex].getStorageID(), "Busy DN shouldn't be reconstructed");
 
     //8. check the checksum of a file
     FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
-    Assert.assertEquals("Checksum mismatches!", fileChecksum1, fileChecksum2);
+      Assertions.assertEquals(fileChecksum1, fileChecksum2, "Checksum mismatches!");
 
     //9. check the data is correct
     StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommissionNodes,
@@ -430,7 +427,7 @@
     final Path ecFile = new Path(ecDir, "testFileChecksumAfterDecommission");
     int writeBytes = cellSize * dataBlocks;
     writeStripedFile(dfs, ecFile, writeBytes);
-    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+    Assertions.assertEquals(0, bm.numOfUnderReplicatedBlocks());
     FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
 
     final List<DatanodeInfo> decommisionNodes = new ArrayList<DatanodeInfo>();
@@ -453,8 +450,8 @@
     LOG.info("fileChecksum1:" + fileChecksum1);
     LOG.info("fileChecksum2:" + fileChecksum2);
 
-    Assert.assertTrue("Checksum mismatches!",
-        fileChecksum1.equals(fileChecksum2));
+      Assertions.assertTrue(
+              fileChecksum1.equals(fileChecksum2), "Checksum mismatches!");
   }
 
   private void testDecommission(int writeBytes, int storageCount,
@@ -484,8 +481,8 @@
 
     // Ensure decommissioned datanode is not automatically shutdown
     DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
-    assertEquals("All datanodes must be alive", numDNs,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDNs,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
 
     assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
     StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
@@ -531,10 +528,10 @@
           locToTokenList.get(i);
       DatanodeInfo[] di = lb.getLocations();
       for (int j = 0; j < di.length; j++) {
-        Assert.assertEquals("Block index value mismatches after sorting",
-            (byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j]);
-        Assert.assertEquals("Block token value mismatches after sorting",
-            locToToken.get(di[j]), stripedBlk.getBlockTokens()[j]);
+          Assertions.assertEquals(
+                  (byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j], "Block index value mismatches after sorting");
+          Assertions.assertEquals(
+                  locToToken.get(di[j]), stripedBlk.getBlockTokens()[j], "Block token value mismatches after sorting");
       }
     }
   }
@@ -620,7 +617,7 @@
           break;
         }
       }
-      assertTrue("Datanode: " + dn + " is not LIVE", nodeExists);
+        assertTrue(nodeExists, "Datanode: " + dn + " is not LIVE");
       excludeNodes.add(dn.getName());
       LOG.info("Decommissioning node: " + dn.getName());
     }
@@ -671,9 +668,9 @@
       List<DatanodeInfo> decommissionedNodes, int numDatanodes)
           throws IOException {
     boolean isNodeDown = decommissionedNodes.size() > 0;
-    // need a raw stream
-    assertTrue("Not HDFS:" + fileSys.getUri(),
-        fileSys instanceof DistributedFileSystem);
+      // need a raw stream
+      assertTrue(
+              fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
     HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     for (LocatedBlock blk : dinfo) { // for each block
@@ -931,7 +928,7 @@
             decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
           } catch (Exception e) {
             LOG.error("Exception while decommissioning", e);
-            Assert.fail("Shouldn't throw exception!");
+            Assertions.fail("Shouldn't throw exception!");
           }
         }).start();
     decomStarted.await(5, TimeUnit.SECONDS);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
index ad5c2a8..f91e4d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
@@ -18,11 +18,11 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestDeprecatedKeys {
  
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java
index 51a28d2..567f897 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -25,7 +25,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests disabling client connection caching in a single node
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index f7dcaef..3e13295 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -22,12 +22,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.inOrder;
 import static org.mockito.Mockito.mock;
@@ -125,8 +120,8 @@
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.InOrder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -226,13 +221,13 @@
       types.add(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES);
       RemoteIterator<OpenFileEntry> listOpenFiles =
           fileSys.listOpenFiles(EnumSet.copyOf(types));
-      assertTrue("Two files should be open", listOpenFiles.hasNext());
+        assertTrue(listOpenFiles.hasNext(), "Two files should be open");
       int countOpenFiles = 0;
       while (listOpenFiles.hasNext()) {
         listOpenFiles.next();
         ++countOpenFiles;
       }
-      assertEquals("Mismatch of open files count", 2, countOpenFiles);
+        assertEquals(2, countOpenFiles, "Mismatch of open files count");
 
       // create another file, close it, and read it, so
       // the client gets a socket in its SocketCache
@@ -444,7 +439,7 @@
         // success
         threw = true;
       }
-      assertTrue("Failed to throw IOE when seeking past end", threw);
+        assertTrue(threw, "Failed to throw IOE when seeking past end");
       input.close();
       threw = false;
       try {
@@ -453,7 +448,7 @@
         //success
         threw = true;
       }
-      assertTrue("Failed to throw IOE when seeking after close", threw);
+        assertTrue(threw, "Failed to throw IOE when seeking after close");
       fileSys.close();
     }
     finally {
@@ -562,7 +557,7 @@
         // Check to see if opening a non-existent file triggers a FNF
         FileSystem fs = cluster.getFileSystem();
         Path dir = new Path("/wrwelkj");
-        assertFalse("File should not exist for test.", fs.exists(dir));
+          assertFalse(fs.exists(dir), "File should not exist for test.");
 
         try {
           FSDataInputStream in = fs.open(dir);
@@ -570,8 +565,8 @@
             in.close();
             fs.close();
           } finally {
-            assertTrue("Did not get a FileNotFoundException for non-existing" +
-                " file.", false);
+              assertTrue(false, "Did not get a FileNotFoundException for non-existing" +
+                      " file.");
           }
         } catch (FileNotFoundException fnf) {
           // This is the proper exception to catch; move on.
@@ -621,11 +616,11 @@
         fs.create(new Path("/tmp/nonEmptyDir/emptyFile")).close();
         try {
           fs.delete(new Path("/tmp/nonEmptyDir"), false);
-          Assert.fail("Expecting PathIsNotEmptyDirectoryException");
+          Assertions.fail("Expecting PathIsNotEmptyDirectoryException");
         } catch (PathIsNotEmptyDirectoryException ex) {
           // This is the proper exception to catch; move on.
         }
-        Assert.assertTrue(fs.exists(new Path("/test/nonEmptyDir")));
+        Assertions.assertTrue(fs.exists(new Path("/test/nonEmptyDir")));
         fs.delete(new Path("/tmp/nonEmptyDir"), true);
       }
 
@@ -1055,8 +1050,8 @@
       // wait until all threads are done
       allDone.await();
 
-     assertNull("Child failed with exception " + childError.get(),
-          childError.get());
+        assertNull(
+                childError.get(), "Child failed with exception " + childError.get());
 
       checkStatistics(fs, 0, numThreads, 0);
       // check the single operation count stat
@@ -1068,8 +1063,8 @@
            opCountIter.hasNext();) {
         final LongStatistic opCount = opCountIter.next();
         if (OpType.MKDIRS.getSymbol().equals(opCount.getName())) {
-          assertEquals("Unexpected op count from iterator!",
-              numThreads + oldMkdirOpCount, opCount.getValue());
+            assertEquals(
+                    numThreads + oldMkdirOpCount, opCount.getValue(), "Unexpected op count from iterator!");
         }
         LOG.info(opCount.getName() + "\t" + opCount.getValue());
       }
@@ -1187,8 +1182,8 @@
   }
 
   public static void checkOpStatistics(OpType op, long count) {
-    assertEquals("Op " + op.getSymbol() + " has unexpected count!",
-        count, getOpStatistics(op));
+      assertEquals(
+              count, getOpStatistics(op), "Op " + op.getSymbol() + " has unexpected count!");
   }
 
   public static long getOpStatistics(OpType op) {
@@ -1219,8 +1214,8 @@
           "/test/TestNonExistingFile"));
       fail("Expecting FileNotFoundException");
     } catch (FileNotFoundException e) {
-      assertTrue("Not throwing the intended exception message", e.getMessage()
-          .contains("File does not exist: /test/TestNonExistingFile"));
+        assertTrue(e.getMessage()
+                .contains("File does not exist: /test/TestNonExistingFile"), "Not throwing the intended exception message");
     }
 
     try {
@@ -1229,8 +1224,8 @@
       hdfs.getFileChecksum(path);
       fail("Expecting FileNotFoundException");
     } catch (FileNotFoundException e) {
-      assertTrue("Not throwing the intended exception message", e.getMessage()
-          .contains("Path is not a file: /test/TestExistingDir"));
+        assertTrue(e.getMessage()
+                .contains("Path is not a file: /test/TestExistingDir"), "Not throwing the intended exception message");
     }
 
     //webhdfs
@@ -1383,10 +1378,10 @@
       DFSTestUtil.waitForReplication(fs, testFile, (short) repl, 30000);
       // Get the listing
       RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(testFile);
-      assertTrue("Expected file to be present", it.hasNext());
+        assertTrue(it.hasNext(), "Expected file to be present");
       LocatedFileStatus stat = it.next();
       BlockLocation[] locs = stat.getBlockLocations();
-      assertEquals("Unexpected number of locations", numBlocks, locs.length);
+        assertEquals(numBlocks, locs.length, "Unexpected number of locations");
 
       Set<String> dnStorageIds = new HashSet<>();
       for (DataNode d : cluster.getDataNodes()) {
@@ -1403,15 +1398,15 @@
         // Run it through a set to deduplicate, since there should be no dupes
         Set<String> storageIds = new HashSet<>();
         Collections.addAll(storageIds, ids);
-        assertEquals("Unexpected num storage ids", repl, storageIds.size());
-        // Make sure these are all valid storage IDs
-        assertTrue("Unknown storage IDs found!", dnStorageIds.containsAll
-            (storageIds));
+          assertEquals(repl, storageIds.size(), "Unexpected num storage ids");
+          // Make sure these are all valid storage IDs
+          assertTrue(dnStorageIds.containsAll
+                  (storageIds), "Unknown storage IDs found!");
         // Check storage types are the default, since we didn't set any
         StorageType[] types = loc.getStorageTypes();
-        assertEquals("Unexpected num storage types", repl, types.length);
+          assertEquals(repl, types.length, "Unexpected num storage types");
         for (StorageType t: types) {
-          assertEquals("Unexpected storage type", StorageType.DEFAULT, t);
+            assertEquals(StorageType.DEFAULT, t, "Unexpected storage type");
         }
       }
     } finally {
@@ -1489,9 +1484,9 @@
       // write to file
       output.writeBytes("Some test data");
       output.flush();
-      assertFalse("File status should be open", fs.isFileClosed(file));
+        assertFalse(fs.isFileClosed(file), "File status should be open");
       output.close();
-      assertTrue("File status should be closed", fs.isFileClosed(file));
+        assertTrue(fs.isFileClosed(file), "File status should be closed");
     } finally {
       cluster.shutdown();
     }
@@ -1596,7 +1591,7 @@
       long start = Time.now();
       try {
         peer.getInputStream().read();
-        Assert.fail("read should timeout");
+        Assertions.fail("read should timeout");
       } catch (SocketTimeoutException ste) {
         long delta = Time.now() - start;
         if (delta < timeout*0.9) {
@@ -1647,7 +1642,7 @@
         byte[] buf = new byte[10 * 1024 * 1024];
         peer.getOutputStream().write(buf);
         long delta = Time.now() - start;
-        Assert.fail("write finish in " + delta + " ms" + "but should timedout");
+        Assertions.fail("write finish in " + delta + " ms" + "but should timedout");
       } catch (SocketTimeoutException ste) {
         long delta = Time.now() - start;
 
@@ -1805,15 +1800,15 @@
       } catch (FileNotFoundException e) {
         // As expected.
       }
-      assertFalse("parent directory should not be created",
-          fs.exists(new Path("/parent")));
+        assertFalse(
+                fs.exists(new Path("/parent")), "parent directory should not be created");
 
       try (FSDataOutputStream out = fs.createFile(nonParentFile).recursive()
         .build()) {
         out.write(1);
       }
-      assertTrue("parent directory has not been created",
-          fs.exists(new Path("/parent")));
+        assertTrue(
+                fs.exists(new Path("/parent")), "parent directory has not been created");
     }
   }
 
@@ -2007,7 +2002,7 @@
       //test enable a policy that doesn't exist
       try {
         fs.enableErasureCodingPolicy("notExistECName");
-        Assert.fail("enable the policy that doesn't exist should fail");
+        Assertions.fail("enable the policy that doesn't exist should fail");
       } catch (Exception e) {
         GenericTestUtils.assertExceptionContains("does not exist", e);
         // pass
@@ -2016,7 +2011,7 @@
       //test disable a policy that doesn't exist
       try {
         fs.disableErasureCodingPolicy("notExistECName");
-        Assert.fail("disable the policy that doesn't exist should fail");
+        Assertions.fail("disable the policy that doesn't exist should fail");
       } catch (Exception e) {
         GenericTestUtils.assertExceptionContains("does not exist", e);
         // pass
@@ -2078,7 +2073,7 @@
           .getBlockLocations(file1.toUri().getPath(), 0, Long.MAX_VALUE);
       int numSSD = Collections.frequency(
           Arrays.asList(locations[0].getStorageTypes()), StorageType.SSD);
-      assertEquals("Number of SSD should be 1 but was : " + numSSD, 1, numSSD);
+        assertEquals(1, numSSD, "Number of SSD should be 1 but was : " + numSSD);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
index 1a2c4de..265dd1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
@@ -28,10 +28,10 @@
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
@@ -39,9 +39,7 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Testing correctness of FileSystem.getFileBlockLocations and
@@ -69,7 +67,7 @@
   @Rule
   public final Timeout globalTimeout = new Timeout(60000 * 3);
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     ecPolicy = getEcPolicy();
     cellSize = ecPolicy.getCellSize();
@@ -92,7 +90,7 @@
         ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
index f9336fc..e7ef41f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.times;
 
 import java.io.IOException;
@@ -53,11 +50,11 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
-import org.junit.Assert;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -118,12 +115,12 @@
     this.resolverClazz = resolverClazz;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new Configuration();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     if (fs != null) {
       fs.close();
@@ -365,11 +362,11 @@
     LOG.info("The encryption key is invalid on all nodes now.");
     fs.getFileChecksum(TEST_PATH);
     // verify that InvalidEncryptionKeyException is handled properly
-    Assert.assertTrue(client.getEncryptionKey() == null);
+    Assertions.assertTrue(client.getEncryptionKey() == null);
     Mockito.verify(spyClient, times(1)).clearDataEncryptionKey();
     // Retry the operation after clearing the encryption key
     FileChecksum verifyChecksum = fs.getFileChecksum(TEST_PATH);
-    Assert.assertEquals(checksum, verifyChecksum);
+    Assertions.assertEquals(checksum, verifyChecksum);
   }
 
   @Test
@@ -428,8 +425,8 @@
       // write data to induce pipeline recovery
       out.write(PLAIN_TEXT.getBytes());
       out.hflush();
-      assertFalse("The first datanode in the pipeline was not replaced.",
-          Arrays.asList(dfstream.getPipeline()).contains(targets[0]));
+        assertFalse(
+                Arrays.asList(dfstream.getPipeline()).contains(targets[0]), "The first datanode in the pipeline was not replaced.");
     }
     // verify that InvalidEncryptionKeyException is handled properly
     Mockito.verify(spyClient, times(1)).clearDataEncryptionKey();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 0775e04..78250fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -17,6 +17,81 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
+import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSTestWrapper;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
+import org.apache.hadoop.hdfs.tools.CryptoAdmin;
+import org.apache.hadoop.hdfs.tools.DFSck;
+import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.token.DelegationTokenIssuer;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Lists;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Rule;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.rules.Timeout;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+import org.xml.sax.InputSource;
+import org.xml.sax.helpers.DefaultHandler;
+
+import javax.xml.parsers.SAXParser;
+import javax.xml.parsers.SAXParserFactory;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
@@ -43,83 +118,7 @@
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoInputStream;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderFactory;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSTestWrapper;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileContextTestWrapper;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.apache.hadoop.fs.FileSystemTestWrapper;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.EncryptionZone;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
-import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
-import org.apache.hadoop.hdfs.tools.CryptoAdmin;
-import org.apache.hadoop.hdfs.tools.DFSck;
-import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
-import org.apache.hadoop.hdfs.web.WebHdfsConstants;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.DelegationTokenIssuer;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.Lists;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-
 import static org.apache.hadoop.fs.CommonConfigurationKeys.DFS_CLIENT_IGNORE_NAMENODE_DEFAULT_KMS_URI;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.anyShort;
-import static org.mockito.Mockito.withSettings;
-import static org.mockito.Mockito.anyString;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
@@ -138,21 +137,19 @@
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-import org.xml.sax.InputSource;
-import org.xml.sax.helpers.DefaultHandler;
-
-import javax.xml.parsers.SAXParser;
-import javax.xml.parsers.SAXParserFactory;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.anyShort;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.withSettings;
 
 public class TestEncryptionZones {
   static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class);
@@ -184,7 +181,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(120 * 1000);
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration();
     fsHelper = new FileSystemTestHelper();
@@ -218,7 +215,7 @@
         .getProvider());
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -234,7 +231,7 @@
       count++;
       it.next();
     }
-    assertEquals("Unexpected number of encryption zones!", numZones, count);
+      assertEquals(numZones, count, "Unexpected number of encryption zones!");
   }
 
   /**
@@ -261,9 +258,9 @@
         break;
       }
     }
-    assertTrue("Did not find expected encryption zone with keyName " + keyName +
-            " path " + path, match
-    );
+      assertTrue(match
+      , "Did not find expected encryption zone with keyName " + keyName +
+              " path " + path);
   }
 
   /**
@@ -298,7 +295,7 @@
     final FsShell shell = new FsShell(clientConf);
     String[] argv = new String[]{"-rm", ezfile1.toString()};
     int res = ToolRunner.run(shell, argv);
-    assertEquals("Can't remove a file in EZ as superuser", 0, res);
+      assertEquals(0, res, "Can't remove a file in EZ as superuser");
 
     final Path trashDir = new Path(zone1, FileSystem.TRASH_PREFIX);
     assertTrue(fsWrapper.exists(trashDir));
@@ -323,7 +320,7 @@
         // /zones/zone1/.Trash/user/Current/zones/zone1/file2
         String[] argv = new String[]{"-rm", ezfile2.toString()};
         int res = ToolRunner.run(shell, argv);
-        assertEquals("Can't remove a file in EZ as user:mygroup", 0, res);
+          assertEquals(0, res, "Can't remove a file in EZ as user:mygroup");
         return null;
       }
     });
@@ -370,23 +367,23 @@
           fail("Exception should be thrown while setting: " +
                   xattrName + " on file:" + raw2File);
         } catch (RemoteException e) {
-          Assert.assertEquals(e.getClassName(),
+          Assertions.assertEquals(e.getClassName(),
                   IllegalArgumentException.class.getCanonicalName());
-          Assert.assertTrue(e.getMessage().
+          Assertions.assertTrue(e.getMessage().
                   contains("does not belong to the key"));
         }
       }
     }
 
-    assertEquals("File can be created on the root encryption zone " +
-            "with correct length", len, fs.getFileStatus(zone1File).getLen());
-    assertTrue("/zone1 dir is encrypted",
-            fs.getFileStatus(zone1).isEncrypted());
-    assertTrue("File is encrypted", fs.getFileStatus(zone1File).isEncrypted());
+      assertEquals(len, fs.getFileStatus(zone1File).getLen(), "File can be created on the root encryption zone " +
+              "with correct length");
+      assertTrue(
+              fs.getFileStatus(zone1).isEncrypted(), "/zone1 dir is encrypted");
+      assertTrue(fs.getFileStatus(zone1File).isEncrypted(), "File is encrypted");
 
-    assertTrue("/zone2 dir is encrypted",
-            fs.getFileStatus(zone2).isEncrypted());
-    assertTrue("File is encrypted", fs.getFileStatus(zone2File).isEncrypted());
+      assertTrue(
+              fs.getFileStatus(zone2).isEncrypted(), "/zone2 dir is encrypted");
+      assertTrue(fs.getFileStatus(zone2File).isEncrypted(), "File is encrypted");
 
     // 4. Now the decrypted contents of the files should be different.
     DFSTestUtil.verifyFilesNotEqual(fs, zone1File, zone2File, len);
@@ -418,7 +415,7 @@
     final Path trashDir = new Path(zone1, FileSystem.TRASH_PREFIX);
     String[] argv = new String[]{"-rmdir", trashDir.toUri().getPath()};
     int res = ToolRunner.run(shell, argv);
-    assertEquals("Unable to delete trash directory.", 0, res);
+      assertEquals(0, res, "Unable to delete trash directory.");
     assertFalse(fsWrapper.exists(trashDir));
 
     // execute -provisionTrash command option and make sure the trash
@@ -436,11 +433,11 @@
   @Test
   public void testBasicOperations() throws Exception {
 
-    assertNotNull("key provider is not present", dfsAdmin.getKeyProvider());
+      assertNotNull(dfsAdmin.getKeyProvider(), "key provider is not present");
     int numZones = 0;
-    /* Number of EZs should be 0 if no EZ is created */
-    assertEquals("Unexpected number of encryption zones!", numZones,
-        cluster.getNamesystem().getNumEncryptionZones());
+      /* Number of EZs should be 0 if no EZ is created */
+      assertEquals(numZones,
+              cluster.getNamesystem().getNumEncryptionZones(), "Unexpected number of encryption zones!");
     /* Test failure of create EZ on a directory that doesn't exist. */
     final Path zoneParent = new Path("/zones");
     final Path zone1 = new Path(zoneParent, "zone1");
@@ -577,8 +574,8 @@
     fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
     cluster.restartNameNode(true);
     assertNumZones(numZones);
-    assertEquals("Unexpected number of encryption zones!", numZones, cluster
-        .getNamesystem().getNumEncryptionZones());
+      assertEquals(numZones, cluster
+              .getNamesystem().getNumEncryptionZones(), "Unexpected number of encryption zones!");
     assertGauge("NumEncryptionZones", numZones, getMetrics(NS_METRICS));
     assertZonePresent(null, zone1.toString());
 
@@ -635,14 +632,14 @@
     assertZonePresent(TEST_KEY, zone1.toString());
     // Check that zone1 contains a .Trash directory
     final Path zone1Trash = new Path(zone1, fs.TRASH_PREFIX);
-    assertTrue("CreateEncryptionZone with trash enabled should create a " +
-        ".Trash directory in the EZ", fs.exists(zone1Trash));
+      assertTrue(fs.exists(zone1Trash), "CreateEncryptionZone with trash enabled should create a " +
+              ".Trash directory in the EZ");
 
     // getEncryptionZoneForPath for FQP should return the path component
     EncryptionZone ezForZone1 = dfsAdmin.getEncryptionZoneForPath(zone1FQP);
-    assertTrue("getEncryptionZoneForPath for fully qualified path should " +
-        "return the path component",
-        ezForZone1.getPath().equals(zone1.toString()));
+      assertTrue(
+              ezForZone1.getPath().equals(zone1.toString()), "getEncryptionZoneForPath for fully qualified path should " +
+              "return the path component");
 
     // Create EZ without Trash
     fsWrapper.mkdir(zone2FQP, FsPermission.getDirDefault(), true);
@@ -655,8 +652,9 @@
     EncryptionZone ezForZone2 = dfsAdmin.getEncryptionZoneForPath(zone2FQP);
     Path ezTrashForZone2 = new Path(ezForZone2.getPath(),
         FileSystem.TRASH_PREFIX);
-    assertTrue("provisionEZTrash with fully qualified path should create " +
-        "trash directory ", fsWrapper.exists(ezTrashForZone2));
+    assertTrue(fsWrapper.exists(ezTrashForZone2),
+        "provisionEZTrash with fully qualified path should create "
+            + "trash directory ");
   }
 
   /**
@@ -741,13 +739,13 @@
            */
         }
 
-        // Check operation with accessible paths
-        assertEquals("expected ez path", allPath.toString(),
-            userAdmin.getEncryptionZoneForPath(allPath).getPath().
-            toString());
-        assertEquals("expected ez path", allPath.toString(),
-            userAdmin.getEncryptionZoneForPath(allPathFile).getPath().
-            toString());
+          // Check operation with accessible paths
+          assertEquals(allPath.toString(),
+                  userAdmin.getEncryptionZoneForPath(allPath).getPath().
+                          toString(), "expected ez path");
+          assertEquals(allPath.toString(),
+                  userAdmin.getEncryptionZoneForPath(allPathFile).getPath().
+                          toString(), "expected ez path");
 
         // Check operation with inaccessible (lack of permissions) path
         try {
@@ -757,39 +755,39 @@
           assertExceptionContains("Permission denied:", e);
         }
 
-        assertNull("expected null for nonexistent path",
-            userAdmin.getEncryptionZoneForPath(nonexistent));
+          assertNull(
+                  userAdmin.getEncryptionZoneForPath(nonexistent), "expected null for nonexistent path");
 
-        // Check operation with non-ez paths
-        assertNull("expected null for non-ez path",
-            userAdmin.getEncryptionZoneForPath(nonEZDir));
-        assertNull("expected null for non-ez path",
-            userAdmin.getEncryptionZoneForPath(nonEZFile));
+          // Check operation with non-ez paths
+          assertNull(
+                  userAdmin.getEncryptionZoneForPath(nonEZDir), "expected null for non-ez path");
+          assertNull(
+                  userAdmin.getEncryptionZoneForPath(nonEZFile), "expected null for non-ez path");
 
         // Check operation with snapshots
         String snapshottedAllPath = newSnap.toString() + allPath.toString();
-        assertEquals("expected ez path", allPath.toString(),
-            userAdmin.getEncryptionZoneForPath(
-                new Path(snapshottedAllPath)).getPath().toString());
+          assertEquals(allPath.toString(),
+                  userAdmin.getEncryptionZoneForPath(
+                          new Path(snapshottedAllPath)).getPath().toString(), "expected ez path");
 
         /*
          * Delete the file from the non-snapshot and test that it is still ok
          * in the ez.
          */
         fs.delete(allPathFile, false);
-        assertEquals("expected ez path", allPath.toString(),
-            userAdmin.getEncryptionZoneForPath(
-                new Path(snapshottedAllPath)).getPath().toString());
+          assertEquals(allPath.toString(),
+                  userAdmin.getEncryptionZoneForPath(
+                          new Path(snapshottedAllPath)).getPath().toString(), "expected ez path");
 
         // Delete the ez and make sure ss's ez is still ok.
         fs.delete(allPath, true);
-        assertEquals("expected ez path", allPath.toString(),
-            userAdmin.getEncryptionZoneForPath(
-                new Path(snapshottedAllPath)).getPath().toString());
-        assertNull("expected null for deleted file path",
-            userAdmin.getEncryptionZoneForPath(allPathFile));
-        assertNull("expected null for deleted directory path",
-            userAdmin.getEncryptionZoneForPath(allPath));
+          assertEquals(allPath.toString(),
+                  userAdmin.getEncryptionZoneForPath(
+                          new Path(snapshottedAllPath)).getPath().toString(), "expected ez path");
+          assertNull(
+                  userAdmin.getEncryptionZoneForPath(allPathFile), "expected null for deleted file path");
+          assertNull(
+                  userAdmin.getEncryptionZoneForPath(allPath), "expected null for deleted directory path");
         return null;
       }
     });
@@ -821,16 +819,16 @@
 
     // Verify that we can rename dir and files within an encryption zone.
     assertTrue(fs.rename(pathFooBaz, pathFooBar));
-    assertTrue("Rename of dir and file within ez failed",
-        !wrapper.exists(pathFooBaz) && wrapper.exists(pathFooBar));
-    assertEquals("Renamed file contents not the same",
-        contents, DFSTestUtil.readFile(fs, pathFooBarFile));
+    assertTrue(!wrapper.exists(pathFooBaz) && wrapper.exists(pathFooBar),
+        "Rename of dir and file within ez failed");
+      assertEquals(
+              contents, DFSTestUtil.readFile(fs, pathFooBarFile), "Renamed file contents not the same");
 
     // Verify that we can rename an EZ root
     final Path newFoo = new Path(testRoot, "newfoo");
-    assertTrue("Rename of EZ root", fs.rename(pathFoo, newFoo));
-    assertTrue("Rename of EZ root failed",
-        !wrapper.exists(pathFoo) && wrapper.exists(newFoo));
+      assertTrue(fs.rename(pathFoo, newFoo), "Rename of EZ root");
+    assertTrue(!wrapper.exists(pathFoo) && wrapper.exists(newFoo),
+        "Rename of EZ root failed");
 
     // Verify that we can't rename an EZ root onto itself
     try {
@@ -884,11 +882,11 @@
     // FEInfos should be different
     FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
     FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
-    assertFalse("EDEKs should be different", Arrays
-        .equals(feInfo1.getEncryptedDataEncryptionKey(),
-            feInfo2.getEncryptedDataEncryptionKey()));
-    assertNotEquals("Key was rolled, versions should be different",
-        feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
+      assertFalse(Arrays
+              .equals(feInfo1.getEncryptedDataEncryptionKey(),
+                      feInfo2.getEncryptedDataEncryptionKey()), "EDEKs should be different");
+      assertNotEquals(
+              feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName(), "Key was rolled, versions should be different");
     // Contents still equal
     verifyFilesEqual(fs, encFile1, encFile2, len);
   }
@@ -991,13 +989,13 @@
         CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH)),
         conf);
     List<String> keys = provider.getKeys();
-    assertEquals("Expected NN to have created one key per zone", 1,
-        keys.size());
+      assertEquals(1,
+              keys.size(), "Expected NN to have created one key per zone");
     List<KeyProvider.KeyVersion> allVersions = Lists.newArrayList();
     for (String key : keys) {
       List<KeyProvider.KeyVersion> versions = provider.getKeyVersions(key);
-      assertEquals("Should only have one key version per key", 1,
-          versions.size());
+        assertEquals(1,
+                versions.size(), "Should only have one key version per key");
       allVersions.addAll(versions);
     }
     // Check that the specified CipherSuite was correctly saved on the NN
@@ -1123,8 +1121,8 @@
     final Path baseFile = new Path(prefix, "base");
     fsWrapper.createFile(baseFile);
     FileStatus stat = fsWrapper.getFileStatus(baseFile);
-    assertFalse("Expected isEncrypted to return false for " + baseFile,
-        stat.isEncrypted());
+      assertFalse(
+              stat.isEncrypted(), "Expected isEncrypted to return false for " + baseFile);
 
     // Create an encrypted file to check isEncrypted returns true
     final Path zone = new Path(prefix, "zone");
@@ -1133,57 +1131,57 @@
     final Path encFile = new Path(zone, "encfile");
     fsWrapper.createFile(encFile);
     stat = fsWrapper.getFileStatus(encFile);
-    assertTrue("Expected isEncrypted to return true for enc file" + encFile,
-        stat.isEncrypted());
+      assertTrue(
+              stat.isEncrypted(), "Expected isEncrypted to return true for enc file" + encFile);
 
     // check that it returns true for an ez root
     stat = fsWrapper.getFileStatus(zone);
-    assertTrue("Expected isEncrypted to return true for ezroot",
-        stat.isEncrypted());
+      assertTrue(
+              stat.isEncrypted(), "Expected isEncrypted to return true for ezroot");
 
     // check that it returns true for a dir in the ez
     final Path zoneSubdir = new Path(zone, "subdir");
     fsWrapper.mkdir(zoneSubdir, FsPermission.getDirDefault(), true);
     stat = fsWrapper.getFileStatus(zoneSubdir);
-    assertTrue(
-        "Expected isEncrypted to return true for ez subdir " + zoneSubdir,
-        stat.isEncrypted());
+      assertTrue(
+              stat.isEncrypted(),
+              "Expected isEncrypted to return true for ez subdir " + zoneSubdir);
 
     // check that it returns false for a non ez dir
     final Path nonEzDirPath = new Path(prefix, "nonzone");
     fsWrapper.mkdir(nonEzDirPath, FsPermission.getDirDefault(), true);
     stat = fsWrapper.getFileStatus(nonEzDirPath);
-    assertFalse(
-        "Expected isEncrypted to return false for directory " + nonEzDirPath,
-        stat.isEncrypted());
+      assertFalse(
+              stat.isEncrypted(),
+              "Expected isEncrypted to return false for directory " + nonEzDirPath);
 
     // check that it returns true for listings within an ez
     FileStatus[] statuses = fsWrapper.listStatus(zone);
     for (FileStatus s : statuses) {
-      assertTrue("Expected isEncrypted to return true for ez stat " + zone,
-          s.isEncrypted());
+        assertTrue(
+                s.isEncrypted(), "Expected isEncrypted to return true for ez stat " + zone);
     }
 
     statuses = fsWrapper.listStatus(encFile);
     for (FileStatus s : statuses) {
-      assertTrue(
-          "Expected isEncrypted to return true for ez file stat " + encFile,
-          s.isEncrypted());
+        assertTrue(
+                s.isEncrypted(),
+                "Expected isEncrypted to return true for ez file stat " + encFile);
     }
 
     // check that it returns false for listings outside an ez
     statuses = fsWrapper.listStatus(nonEzDirPath);
     for (FileStatus s : statuses) {
-      assertFalse(
-          "Expected isEncrypted to return false for nonez stat " + nonEzDirPath,
-          s.isEncrypted());
+        assertFalse(
+                s.isEncrypted(),
+                "Expected isEncrypted to return false for nonez stat " + nonEzDirPath);
     }
 
     statuses = fsWrapper.listStatus(baseFile);
     for (FileStatus s : statuses) {
-      assertFalse(
-          "Expected isEncrypted to return false for non ez stat " + baseFile,
-          s.isEncrypted());
+        assertFalse(
+                s.isEncrypted(),
+                "Expected isEncrypted to return false for non ez stat " + baseFile);
     }
   }
 
@@ -1314,8 +1312,8 @@
     executor.submit(new InjectFaultTask() {
       @Override
       public void doCleanup() throws Exception {
-        assertEquals("Expected no startFile key generation",
-            -1, injector.generateCount);
+          assertEquals(
+                  -1, injector.generateCount, "Expected no startFile key generation");
         fsWrapper.delete(file, false);
       }
     }).get();
@@ -1331,7 +1329,7 @@
       }
       @Override
       public void doCleanup() throws Exception {
-        assertEquals("Expected no startFile retries", 1, injector.generateCount);
+          assertEquals(1, injector.generateCount, "Expected no startFile retries");
         fsWrapper.delete(file, false);
       }
     }).get();
@@ -1353,7 +1351,7 @@
       }
       @Override
       public void doCleanup() throws Exception {
-        assertEquals("Expected a startFile retry", 2, injector.generateCount);
+          assertEquals(2, injector.generateCount, "Expected a startFile retry");
         fsWrapper.delete(zone1, true);
       }
     }).get();
@@ -1422,9 +1420,9 @@
     Credentials creds = new Credentials();
     final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
     LOG.debug("Delegation tokens: " + Arrays.asList(tokens));
-    Assert.assertEquals(2, tokens.length);
-    Assert.assertEquals(tokens[1], testToken);
-    Assert.assertEquals(2, creds.numberOfTokens());
+    Assertions.assertEquals(2, tokens.length);
+    Assertions.assertEquals(tokens[1], testToken);
+    Assertions.assertEquals(2, creds.numberOfTokens());
   }
 
   /**
@@ -1443,18 +1441,18 @@
     PrintStream out = new PrintStream(bStream, true);
     int errCode = ToolRunner.run(new DFSck(conf, out),
         new String[]{ "/" });
-    assertEquals("Fsck ran with non-zero error code", 0, errCode);
+      assertEquals(0, errCode, "Fsck ran with non-zero error code");
     String result = bStream.toString();
-    assertTrue("Fsck did not return HEALTHY status",
-        result.contains(NamenodeFsck.HEALTHY_STATUS));
+      assertTrue(
+              result.contains(NamenodeFsck.HEALTHY_STATUS), "Fsck did not return HEALTHY status");
 
     // Run fsck directly on the encryption zone instead of root
     errCode = ToolRunner.run(new DFSck(conf, out),
         new String[]{ zoneParent.toString() });
-    assertEquals("Fsck ran with non-zero error code", 0, errCode);
+      assertEquals(0, errCode, "Fsck ran with non-zero error code");
     result = bStream.toString();
-    assertTrue("Fsck did not return HEALTHY status",
-        result.contains(NamenodeFsck.HEALTHY_STATUS));
+      assertTrue(
+              result.contains(NamenodeFsck.HEALTHY_STATUS), "Fsck did not return HEALTHY status");
   }
 
   /**
@@ -1477,8 +1475,8 @@
     String contents = DFSTestUtil.readFile(fs, zoneFile);
     final Path snap1 = fs.createSnapshot(zoneParent, "snap1");
     final Path snap1Zone = new Path(snap1, zone.getName());
-    assertEquals("Got unexpected ez path", zone.toString(),
-        dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
+      assertEquals(zone.toString(),
+              dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString(), "Got unexpected ez path");
 
     // Now delete the encryption zone, recreate the dir, and take another
     // snapshot
@@ -1486,34 +1484,34 @@
     fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
     final Path snap2 = fs.createSnapshot(zoneParent, "snap2");
     final Path snap2Zone = new Path(snap2, zone.getName());
-    assertEquals("Got unexpected ez path", zone.toString(),
-        dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
-    assertNull("Expected null ez path",
-        dfsAdmin.getEncryptionZoneForPath(snap2Zone));
+      assertEquals(zone.toString(),
+              dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString(), "Got unexpected ez path");
+      assertNull(
+              dfsAdmin.getEncryptionZoneForPath(snap2Zone), "Expected null ez path");
 
     // Create the encryption zone again, and that shouldn't affect old snapshot
     dfsAdmin.createEncryptionZone(zone, TEST_KEY2, NO_TRASH);
     EncryptionZone ezSnap1 = dfsAdmin.getEncryptionZoneForPath(snap1Zone);
-    assertEquals("Got unexpected ez path", zone.toString(),
-        ezSnap1.getPath().toString());
-    assertEquals("Unexpected ez key", TEST_KEY, ezSnap1.getKeyName());
-    assertNull("Expected null ez path",
-        dfsAdmin.getEncryptionZoneForPath(snap2Zone));
+      assertEquals(zone.toString(),
+              ezSnap1.getPath().toString(), "Got unexpected ez path");
+      assertEquals(TEST_KEY, ezSnap1.getKeyName(), "Unexpected ez key");
+      assertNull(
+              dfsAdmin.getEncryptionZoneForPath(snap2Zone), "Expected null ez path");
 
     final Path snap3 = fs.createSnapshot(zoneParent, "snap3");
     final Path snap3Zone = new Path(snap3, zone.getName());
     // Check that snap3's EZ has the correct settings
     EncryptionZone ezSnap3 = dfsAdmin.getEncryptionZoneForPath(snap3Zone);
-    assertEquals("Got unexpected ez path", zone.toString(),
-        ezSnap3.getPath().toString());
-    assertEquals("Unexpected ez key", TEST_KEY2, ezSnap3.getKeyName());
+      assertEquals(zone.toString(),
+              ezSnap3.getPath().toString(), "Got unexpected ez path");
+      assertEquals(TEST_KEY2, ezSnap3.getKeyName(), "Unexpected ez key");
     // Check that older snapshots still have the old EZ settings
     ezSnap1 = dfsAdmin.getEncryptionZoneForPath(snap1Zone);
-    assertEquals("Got unexpected ez path", zone.toString(),
-        ezSnap1.getPath().toString());
-    assertEquals("Unexpected ez key", TEST_KEY, ezSnap1.getKeyName());
-    assertNull("Expected null ez path",
-        dfsAdmin.getEncryptionZoneForPath(snap2Zone));
+      assertEquals(zone.toString(),
+              ezSnap1.getPath().toString(), "Got unexpected ez path");
+      assertEquals(TEST_KEY, ezSnap1.getKeyName(), "Unexpected ez key");
+      assertNull(
+              dfsAdmin.getEncryptionZoneForPath(snap2Zone), "Expected null ez path");
 
     // Check that listEZs only shows the current filesystem state
     ArrayList<EncryptionZone> listZones = Lists.newArrayList();
@@ -1524,29 +1522,29 @@
     for (EncryptionZone z: listZones) {
       System.out.println(z);
     }
-    assertEquals("Did not expect additional encryption zones!", 1,
-        listZones.size());
+      assertEquals(1,
+              listZones.size(), "Did not expect additional encryption zones!");
     EncryptionZone listZone = listZones.get(0);
-    assertEquals("Got unexpected ez path", zone.toString(),
-        listZone.getPath().toString());
-    assertEquals("Unexpected ez key", TEST_KEY2, listZone.getKeyName());
+      assertEquals(zone.toString(),
+              listZone.getPath().toString(), "Got unexpected ez path");
+      assertEquals(TEST_KEY2, listZone.getKeyName(), "Unexpected ez key");
 
     // Verify contents of the snapshotted file
     final Path snapshottedZoneFile = new Path(
         snap1.toString() + "/" + zone.getName() + "/" + zoneFile.getName());
-    assertEquals("Contents of snapshotted file have changed unexpectedly",
-        contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));
+      assertEquals(
+              contents, DFSTestUtil.readFile(fs, snapshottedZoneFile), "Contents of snapshotted file have changed unexpectedly");
 
     // Now delete the snapshots out of order and verify the zones are still
     // correct
     fs.deleteSnapshot(zoneParent, snap2.getName());
-    assertEquals("Got unexpected ez path", zone.toString(),
-        dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
-    assertEquals("Got unexpected ez path", zone.toString(),
-        dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
+      assertEquals(zone.toString(),
+              dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString(), "Got unexpected ez path");
+      assertEquals(zone.toString(),
+              dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString(), "Got unexpected ez path");
     fs.deleteSnapshot(zoneParent, snap1.getName());
-    assertEquals("Got unexpected ez path", zone.toString(),
-        dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
+      assertEquals(zone.toString(),
+              dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString(), "Got unexpected ez path");
   }
 
   /**
@@ -1570,16 +1568,16 @@
     // Now delete the file and create encryption zone
     fsWrapper.delete(zoneFile, false);
     dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
-    assertEquals("Got unexpected ez path", zone.toString(),
-        dfsAdmin.getEncryptionZoneForPath(zone).getPath());
+      assertEquals(zone.toString(),
+              dfsAdmin.getEncryptionZoneForPath(zone).getPath(), "Got unexpected ez path");
 
     // The file in snapshot shouldn't have any encryption info
     final Path snapshottedZoneFile = new Path(
         snap1 + "/" + zone.getName() + "/" + zoneFile.getName());
     FileEncryptionInfo feInfo = getFileEncryptionInfo(snapshottedZoneFile);
-    assertNull("Expected null ez info", feInfo);
-    assertEquals("Contents of snapshotted file have changed unexpectedly",
-        contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));
+      assertNull(feInfo, "Expected null ez info");
+      assertEquals(
+              contents, DFSTestUtil.readFile(fs, snapshottedZoneFile), "Contents of snapshotted file have changed unexpectedly");
   }
 
   /**
@@ -1641,8 +1639,8 @@
     DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
     String content = DFSTestUtil.readFile(fs, target);
     fs.createSymlink(target, link, false);
-    assertEquals("Contents read from link are not the same as target",
-        content, DFSTestUtil.readFile(fs, link));
+      assertEquals(
+              content, DFSTestUtil.readFile(fs, link), "Contents read from link are not the same as target");
     fs.delete(parent, true);
 
     // Now let's test when the symlink and target are in different
@@ -1654,8 +1652,8 @@
     DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
     content = DFSTestUtil.readFile(fs, target);
     fs.createSymlink(target, link, false);
-    assertEquals("Contents read from link are not the same as target",
-        content, DFSTestUtil.readFile(fs, link));
+      assertEquals(
+              content, DFSTestUtil.readFile(fs, link), "Contents read from link are not the same as target");
     fs.delete(link, true);
     fs.delete(target, true);
   }
@@ -1725,13 +1723,13 @@
     dfsAdmin.createEncryptionZone(rootDir, TEST_KEY, NO_TRASH);
     DFSTestUtil.createFile(fs, zoneFile, len, (short) 1, 0xFEED);
 
-    assertEquals("File can be created on the root encryption zone " +
-            "with correct length",
-        len, fs.getFileStatus(zoneFile).getLen());
-    assertEquals("Root dir is encrypted",
-        true, fs.getFileStatus(rootDir).isEncrypted());
-    assertEquals("File is encrypted",
-        true, fs.getFileStatus(zoneFile).isEncrypted());
+      assertEquals(
+              len, fs.getFileStatus(zoneFile).getLen(), "File can be created on the root encryption zone " +
+              "with correct length");
+      assertEquals(
+              true, fs.getFileStatus(rootDir).isEncrypted(), "Root dir is encrypted");
+      assertEquals(
+              true, fs.getFileStatus(zoneFile).isEncrypted(), "File is encrypted");
     DFSTestUtil.verifyFilesNotEqual(fs, zoneFile, rawFile, len);
   }
 
@@ -1749,8 +1747,8 @@
     assertNumZones(1);
     assertZonePresent(TEST_KEY, "/somewhere/base/zone");
 
-    assertEquals("Got unexpected ez path", "/somewhere/base/zone", dfsAdmin
-        .getEncryptionZoneForPath(zoneDir).getPath().toString());
+      assertEquals("/somewhere/base/zone", dfsAdmin
+              .getEncryptionZoneForPath(zoneDir).getPath().toString(), "Got unexpected ez path");
   }
 
   @Test
@@ -1760,12 +1758,12 @@
     dfsAdmin.createEncryptionZone(ezPath, TEST_KEY, NO_TRASH);
     Path zoneFile = new Path(ezPath, "file");
     EncryptionZone ez = fs.getEZForPath(zoneFile);
-    assertNotNull("Expected EZ for non-existent path in EZ", ez);
+      assertNotNull(ez, "Expected EZ for non-existent path in EZ");
     ez = dfsAdmin.getEncryptionZoneForPath(zoneFile);
-    assertNotNull("Expected EZ for non-existent path in EZ", ez);
+      assertNotNull(ez, "Expected EZ for non-existent path in EZ");
     ez = dfsAdmin.getEncryptionZoneForPath(
         new Path("/does/not/exist"));
-    assertNull("Expected null for non-existent path not in EZ", ez);
+      assertNull(ez, "Expected null for non-existent path not in EZ");
   }
 
   @Test
@@ -1851,8 +1849,8 @@
     // if root path is an encryption zone
     Path encFileCurrentTrash = shell.getCurrentTrashDir(encFile);
     Path rootDirCurrentTrash = shell.getCurrentTrashDir(rootDir);
-    assertEquals("Root trash should be equal with ezFile trash",
-        encFileCurrentTrash, rootDirCurrentTrash);
+      assertEquals(
+              encFileCurrentTrash, rootDirCurrentTrash, "Root trash should be equal with ezFile trash");
 
     // Use webHDFS client to test trash root path
     final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(
@@ -1882,7 +1880,7 @@
     fs.mkdirs(ezRoot3);
     dfsAdmin.createEncryptionZone(ezRoot3, TEST_KEY, NO_TRASH);
     Collection<FileStatus> trashRootsBegin = fs.getTrashRoots(true);
-    assertEquals("Unexpected getTrashRoots result", 0, trashRootsBegin.size());
+      assertEquals(0, trashRootsBegin.size(), "Unexpected getTrashRoots result");
 
     final Path encFile = new Path(ezRoot2, "encFile");
     final int len = 8192;
@@ -1893,16 +1891,16 @@
     verifyShellDeleteWithTrash(shell, encFile);
 
     Collection<FileStatus> trashRootsDelete1 = fs.getTrashRoots(true);
-    assertEquals("Unexpected getTrashRoots result", 1,
-        trashRootsDelete1.size());
+      assertEquals(1,
+              trashRootsDelete1.size(), "Unexpected getTrashRoots result");
 
     final Path nonEncFile = new Path("/nonEncFile");
     DFSTestUtil.createFile(fs, nonEncFile, len, (short) 1, 0xFEED);
     verifyShellDeleteWithTrash(shell, nonEncFile);
 
     Collection<FileStatus> trashRootsDelete2 = fs.getTrashRoots(true);
-    assertEquals("Unexpected getTrashRoots result", 2,
-        trashRootsDelete2.size());
+      assertEquals(2,
+              trashRootsDelete2.size(), "Unexpected getTrashRoots result");
   }
 
   private void verifyShellDeleteWithTrash(FsShell shell, Path path)
@@ -1914,14 +1912,14 @@
       while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
         checkTrash = checkTrash.getParent();
       }
-      assertEquals("No .Trash component found in trash dir " + trashDir,
-          ".Trash", checkTrash.getName());
+        assertEquals(
+                ".Trash", checkTrash.getName(), "No .Trash component found in trash dir " + trashDir);
       final Path trashFile =
           new Path(shell.getCurrentTrashDir(path) + "/" + path);
       String[] argv = new String[]{"-rm", "-r", path.toString()};
       int res = ToolRunner.run(shell, argv);
-      assertEquals("rm failed", 0, res);
-      assertTrue("File not in trash : " + trashFile, fs.exists(trashFile));
+        assertEquals(0, res, "rm failed");
+        assertTrue(fs.exists(trashFile), "File not in trash : " + trashFile);
     } catch (IOException ioe) {
       fail(ioe.getMessage());
     } finally {
@@ -1948,9 +1946,9 @@
     credentials.addSecretKey(lookUpKey,
         DFSUtilClient.string2Bytes(dummyKeyProvider));
     client.ugi.addCredentials(credentials);
-    Assert.assertEquals("Client Key provider is different from provider in "
-        + "credentials map", dummyKeyProvider,
-        client.getKeyProviderUri().toString());
+      Assertions.assertEquals(dummyKeyProvider,
+              client.getKeyProviderUri().toString(), "Client Key provider is different from provider in "
+              + "credentials map");
   }
 
 
@@ -1974,9 +1972,9 @@
         getTestServerDefaults(null);
     Mockito.doReturn(serverDefaultsWithKeyProviderNull)
         .when(mockClient).getServerDefaults();
-    Assert.assertEquals(
-        "Key provider uri from client doesn't match with uri from conf",
-        dummyKeyProviderUri1, mockClient.getKeyProviderUri().toString());
+      Assertions.assertEquals(
+              dummyKeyProviderUri1, mockClient.getKeyProviderUri().toString(),
+              "Key provider uri from client doesn't match with uri from conf");
     Mockito.verify(mockClient, Mockito.times(1)).getServerDefaults();
 
     String dummyKeyProviderUri2 = "dummy://foo:bar@test_provider2";
@@ -1985,9 +1983,9 @@
     // Namenode returning dummyKeyProvider2 in serverDefaults.
     Mockito.doReturn(serverDefaultsWithDummyKeyProvider)
     .when(mockClient).getServerDefaults();
-    Assert.assertEquals(
-        "Key provider uri from client doesn't match with uri from namenode",
-        dummyKeyProviderUri2, mockClient.getKeyProviderUri().toString());
+      Assertions.assertEquals(
+              dummyKeyProviderUri2, mockClient.getKeyProviderUri().toString(),
+              "Key provider uri from client doesn't match with uri from namenode");
     Mockito.verify(mockClient, Mockito.times(2)).getServerDefaults();
   }
 
@@ -2001,38 +1999,38 @@
   public void testDifferentKMSProviderOnUpgradedNamenode() throws Exception {
     Configuration clusterConf = cluster.getConfiguration(0);
     URI namenodeKeyProviderUri = URI.create(getKeyProviderURI());
-    Assert.assertEquals("Key Provider for client and namenode are different",
-        namenodeKeyProviderUri, cluster.getFileSystem().getClient()
-        .getKeyProviderUri());
+      Assertions.assertEquals(
+              namenodeKeyProviderUri, cluster.getFileSystem().getClient()
+              .getKeyProviderUri(), "Key Provider for client and namenode are different");
 
     // Unset the provider path in conf
     clusterConf.unset(
         CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
-    // Even after unsetting the local conf, the client key provider should be
-    // the same as namenode's provider.
-    Assert.assertEquals("Key Provider for client and namenode are different",
-        namenodeKeyProviderUri, cluster.getFileSystem().getClient()
-        .getKeyProviderUri());
+      // Even after unsetting the local conf, the client key provider should be
+      // the same as namenode's provider.
+      Assertions.assertEquals(
+              namenodeKeyProviderUri, cluster.getFileSystem().getClient()
+              .getKeyProviderUri(), "Key Provider for client and namenode are different");
 
     // Set the provider path to some dummy scheme.
     clusterConf.set(
         CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "dummy://foo:bar@test_provider1");
-    // Even after pointing the conf to some dummy provider, the client key
-    // provider should be the same as namenode's provider.
-    Assert.assertEquals("Key Provider for client and namenode are different",
-        namenodeKeyProviderUri, cluster.getFileSystem().getClient()
-        .getKeyProviderUri());
+      // Even after pointing the conf to some dummy provider, the client key
+      // provider should be the same as namenode's provider.
+      Assertions.assertEquals(
+              namenodeKeyProviderUri, cluster.getFileSystem().getClient()
+              .getKeyProviderUri(), "Key Provider for client and namenode are different");
 
     // Ignore the key provider from NN.
     clusterConf.setBoolean(
         DFS_CLIENT_IGNORE_NAMENODE_DEFAULT_KMS_URI, true);
-    Assert.assertEquals("Expecting Key Provider for client config",
-        "dummy://foo:bar@test_provider1", cluster.getFileSystem().getClient()
-            .getKeyProviderUri().toString());
-    Assert.assertNotEquals("Key Provider for client and namenode is different",
-        namenodeKeyProviderUri, cluster.getFileSystem().getClient()
-            .getKeyProviderUri().toString());
+      Assertions.assertEquals(
+              "dummy://foo:bar@test_provider1", cluster.getFileSystem().getClient()
+              .getKeyProviderUri().toString(), "Expecting Key Provider for client config");
+      Assertions.assertNotEquals(
+              namenodeKeyProviderUri, cluster.getFileSystem().getClient()
+              .getKeyProviderUri().toString(), "Key Provider for client and namenode is different");
   }
 
   /**
@@ -2047,11 +2045,11 @@
     URI namenodeKeyProviderUri = URI.create(getKeyProviderURI());
     URI clientKeyProviderUri =
         cluster.getFileSystem().getClient().getKeyProviderUri();
-    Assert.assertNotNull(clientKeyProviderUri);
-    // Since the client and the namenode share the same conf, they will have
-    // identical key provider.
-    Assert.assertEquals("Key Provider for client and namenode are different",
-        namenodeKeyProviderUri, clientKeyProviderUri);
+    Assertions.assertNotNull(clientKeyProviderUri);
+      // Since the client and the namenode share the same conf, they will have
+      // identical key provider.
+      Assertions.assertEquals(
+              namenodeKeyProviderUri, clientKeyProviderUri, "Key Provider for client and namenode are different");
 
     String dummyKeyProviderUri = "dummy://foo:bar@test_provider";
     // Unset the provider path in conf.
@@ -2067,8 +2065,8 @@
     // Since FsServerDefaults#keyProviderUri is null, the client
     // will fallback to local conf which is null.
     clientKeyProviderUri = spyClient.getKeyProviderUri();
-    Assert.assertEquals("Client keyProvider should be " + dummyKeyProviderUri,
-        dummyKeyProviderUri, clientKeyProviderUri.toString());
+      Assertions.assertEquals(
+              dummyKeyProviderUri, clientKeyProviderUri.toString(), "Client keyProvider should be " + dummyKeyProviderUri);
     Mockito.verify(spyClient, Mockito.times(1)).getServerDefaults();
   }
 
@@ -2157,17 +2155,17 @@
     boolean match = false;
     while (it.hasNext()) {
       EncryptionZone ez = it.next();
-      assertNotEquals("EncryptionZone " + zoneSubChild.toString() +
-          " should not be listed.",
-          ez.getPath(), zoneSubChild.toString());
+        assertNotEquals(
+                ez.getPath(), zoneSubChild.toString(), "EncryptionZone " + zoneSubChild.toString() +
+                " should not be listed.");
     }
     //will "trash" the zone direct child of snapshottable directory
     verifyShellDeleteWithTrash(shell, zoneDirectChild);
     //permanently remove zone direct child of snapshottable directory
     fsWrapper.delete(shell.getCurrentTrashDir(zoneDirectChild), true);
-    assertFalse("listEncryptionZones should not return anything, " +
-            "since both EZs were deleted.",
-        dfsAdmin.listEncryptionZones().hasNext());
+      assertFalse(
+              dfsAdmin.listEncryptionZones().hasNext(), "listEncryptionZones should not return anything, " +
+              "since both EZs were deleted.");
   }
 
   /**
@@ -2200,9 +2198,9 @@
     final Token<?>[] tokens =
         webfs.addDelegationTokens("JobTracker", creds);
 
-    Assert.assertEquals(2, tokens.length);
-    Assert.assertEquals(tokens[1], testToken);
-    Assert.assertEquals(2, creds.numberOfTokens());
+    Assertions.assertEquals(2, tokens.length);
+    Assertions.assertEquals(tokens[1], testToken);
+    Assertions.assertEquals(2, creds.numberOfTokens());
   }
 
   /**
@@ -2237,8 +2235,8 @@
     // raw encrypted bytes.
     InputStream cryptoStream =
         webhdfs.open(encryptedFilePath).getWrappedStream();
-    Assert.assertTrue("cryptoStream should be an instance of "
-        + "CryptoInputStream", (cryptoStream instanceof CryptoInputStream));
+      Assertions.assertTrue((cryptoStream instanceof CryptoInputStream), "cryptoStream should be an instance of "
+              + "CryptoInputStream");
     InputStream encryptedStream =
         ((CryptoInputStream)cryptoStream).getWrappedStream();
     // Verify that the data read from the raw input stream is different
@@ -2254,7 +2252,7 @@
       IOUtils.copyBytes(is, os, 1024, true);
       streamBytes = os.toByteArray();
     }
-    Assert.assertArrayEquals(content.getBytes(), streamBytes);
+    Assertions.assertArrayEquals(content.getBytes(), streamBytes);
   }
 
   private void verifyRaw(String content, InputStream is, InputStream rawIs)
@@ -2264,14 +2262,14 @@
       IOUtils.copyBytes(is, os, 1024, true);
       streamBytes = os.toByteArray();
     }
-    Assert.assertFalse(Arrays.equals(content.getBytes(), streamBytes));
+    Assertions.assertFalse(Arrays.equals(content.getBytes(), streamBytes));
 
     // webhdfs raw bytes should match the raw bytes from dfs.
     try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
       IOUtils.copyBytes(rawIs, os, 1024, true);
       rawBytes = os.toByteArray();
     }
-    Assert.assertArrayEquals(rawBytes, streamBytes);
+    Assertions.assertArrayEquals(rawBytes, streamBytes);
   }
 
   /* Tests that if client is old and namenode is new then the
@@ -2297,7 +2295,7 @@
     String location = namenodeConnection.getHeaderField("Location");
     URL datanodeURL = new URL(location);
     String path = datanodeURL.getPath();
-    Assert.assertEquals(
+    Assertions.assertEquals(
         WebHdfsFileSystem.PATH_PREFIX + encryptedFilePath.toString(), path);
     HttpURLConnection datanodeConnection = returnConnection(datanodeURL,
         "GET", false);
@@ -2330,11 +2328,11 @@
     // Return a connection with client not supporting EZ.
     HttpURLConnection namenodeConnection =
         returnConnection(url, "GET", false);
-    Assert.assertNotNull(namenodeConnection.getHeaderField("Location"));
+    Assertions.assertNotNull(namenodeConnection.getHeaderField("Location"));
     URL datanodeUrl = new URL(namenodeConnection.getHeaderField("Location"));
-    Assert.assertNotNull(datanodeUrl);
+    Assertions.assertNotNull(datanodeUrl);
     String path = datanodeUrl.getPath();
-    Assert.assertEquals(
+    Assertions.assertEquals(
         WebHdfsFileSystem.PATH_PREFIX + encryptedFilePath.toString(), path);
 
     url = new URL("http", addr.getHostString(), addr.getPort(),
@@ -2342,11 +2340,11 @@
         + "?op=OPEN");
     // Return a connection with client supporting EZ.
     namenodeConnection = returnConnection(url, "GET", true);
-    Assert.assertNotNull(namenodeConnection.getHeaderField("Location"));
+    Assertions.assertNotNull(namenodeConnection.getHeaderField("Location"));
     datanodeUrl = new URL(namenodeConnection.getHeaderField("Location"));
-    Assert.assertNotNull(datanodeUrl);
+    Assertions.assertNotNull(datanodeUrl);
     path = datanodeUrl.getPath();
-    Assert.assertEquals(WebHdfsFileSystem.PATH_PREFIX
+    Assertions.assertEquals(WebHdfsFileSystem.PATH_PREFIX
         + "/.reserved/raw" + encryptedFilePath.toString(), path);
   }
 
@@ -2382,7 +2380,7 @@
     FSDataInputStream in = webfs.open(encryptedFilePath);
     for (int i = 0; i < 1024; i++) {
       in.seek(i);
-      Assert.assertEquals((data[i] & 0XFF), in.read());
+      Assertions.assertEquals((data[i] & 0XFF), in.read());
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java
index bb8b79b..651e320 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java
@@ -28,10 +28,10 @@
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.File;
 import java.io.IOException;
@@ -55,7 +55,7 @@
   protected static final EnumSet< CreateEncryptionZoneFlag > NO_TRASH =
       EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
@@ -87,7 +87,7 @@
     fs.getClient().setKeyProvider(nn0Provider);
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -115,12 +115,12 @@
     cluster.shutdownNameNode(0);
     cluster.transitionToActive(1);
 
-    Assert.assertEquals("Got unexpected ez path", dir.toString(),
-        dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
-    Assert.assertEquals("Got unexpected ez path", dir.toString(),
-        dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
-    Assert.assertEquals("File contents after failover were changed",
-        contents, DFSTestUtil.readFile(fs, dirFile));
+      Assertions.assertEquals(dir.toString(),
+              dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString(), "Got unexpected ez path");
+      Assertions.assertEquals(dir.toString(),
+              dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString(), "Got unexpected ez path");
+      Assertions.assertEquals(
+              contents, DFSTestUtil.readFile(fs, dirFile), "File contents after failover were changed");
   }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
index d29db2b..05c1f2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertTrue;
-
 import java.util.function.Supplier;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
 import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
@@ -34,10 +35,10 @@
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.File;
 import java.util.Arrays;
@@ -53,18 +54,18 @@
         miniKMS.getKMSUrl().toExternalForm().replace("://", "@");
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     File kmsDir = new File("target/test-classes/" +
         UUID.randomUUID().toString());
-    Assert.assertTrue(kmsDir.mkdirs());
+    Assertions.assertTrue(kmsDir.mkdirs());
     MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
     miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
     miniKMS.start();
     super.setup();
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     super.teardown();
     miniKMS.stop();
@@ -100,13 +101,13 @@
     Credentials creds = new Credentials();
     Token<?> tokens[] = fs.addDelegationTokens(renewer, creds);
     LOG.debug("Delegation tokens: " + Arrays.asList(tokens));
-    Assert.assertEquals(2, tokens.length);
-    Assert.assertEquals(2, creds.numberOfTokens());
+    Assertions.assertEquals(2, tokens.length);
+    Assertions.assertEquals(2, creds.numberOfTokens());
     
     // If the dt exists, will not get again
     tokens = fs.addDelegationTokens(renewer, creds);
-    Assert.assertEquals(0, tokens.length);
-    Assert.assertEquals(2, creds.numberOfTokens());
+    Assertions.assertEquals(0, tokens.length);
+    Assertions.assertEquals(2, creds.numberOfTokens());
   }
 
   @Test(timeout = 120000)
@@ -122,8 +123,8 @@
 
     @SuppressWarnings("unchecked")
     KMSClientProvider spy = getKMSClientProvider();
-    assertTrue("key queue is empty after creating encryption zone",
-        spy.getEncKeyQueueSize(TEST_KEY) > 0);
+      assertTrue(
+              spy.getEncKeyQueueSize(TEST_KEY) > 0, "key queue is empty after creating encryption zone");
 
     conf.setInt(
         DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY, 0);
@@ -151,9 +152,9 @@
     Credentials creds = new Credentials();
     final Token<?>[] tokens = webfs.addDelegationTokens("JobTracker", creds);
 
-    Assert.assertEquals(2, tokens.length);
-    Assert.assertEquals(KMSDelegationToken.TOKEN_KIND_STR,
+    Assertions.assertEquals(2, tokens.length);
+    Assertions.assertEquals(KMSDelegationToken.TOKEN_KIND_STR,
         tokens[1].getKind().toString());
-    Assert.assertEquals(2, creds.numberOfTokens());
+    Assertions.assertEquals(2, creds.numberOfTokens());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
index da3407d..860d31f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
@@ -23,10 +23,10 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.Rule;
 import org.junit.rules.Timeout;
 
@@ -43,7 +43,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     conf = new HdfsConfiguration();
     int numDN = ErasureCodeBenchmarkThroughput.getEcPolicy().getNumDataUnits() +
@@ -55,7 +55,7 @@
         ErasureCodeBenchmarkThroughput.getEcPolicy().getName());
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown(true);
@@ -63,9 +63,9 @@
   }
 
   private static void runBenchmark(String[] args) throws Exception {
-    Assert.assertNotNull(conf);
-    Assert.assertNotNull(fs);
-    Assert.assertEquals(0, ToolRunner.run(conf,
+    Assertions.assertNotNull(conf);
+    Assertions.assertNotNull(fs);
+    Assertions.assertEquals(0, ToolRunner.run(conf,
         new ErasureCodeBenchmarkThroughput(fs), args));
   }
 
@@ -80,7 +80,7 @@
             ErasureCodeBenchmarkThroughput.getFilePath(dataSize, isEc));
       }
     });
-    Assert.assertEquals(numFile, statuses.length);
+    Assertions.assertEquals(numFile, statuses.length);
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingAddConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingAddConfig.java
index 24c88bd..181ae3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingAddConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingAddConfig.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 
@@ -28,7 +25,7 @@
 import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test that ensures addition of user defined EC policies is allowed only when
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
index e4b09a8..9b852b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
@@ -33,9 +33,9 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,7 +58,7 @@
 import static org.apache.hadoop.fs.permission.FsAction.NONE;
 import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test after enable Erasure Coding on cluster, exercise Java API make sure they
@@ -82,7 +82,7 @@
       LoggerFactory.getLogger(TestErasureCodingExerciseAPIs.class);
 
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws IOException {
     ecPolicy = getEcPolicy();
     conf = new HdfsConfiguration();
@@ -252,8 +252,8 @@
     AclStatus as = fs.getAclStatus(p);
 
     for (AclEntry entry : aclSpec) {
-      assertTrue(String.format("as: %s, entry: %s", as, entry),
-          as.getEntries().contains(entry));
+        assertTrue(
+                as.getEntries().contains(entry), String.format("as: %s, entry: %s", as, entry));
     }
     List<AclEntry> maclSpec = Lists.newArrayList(
         aclEntry(ACCESS, USER, "bar", READ_EXECUTE),
@@ -262,8 +262,8 @@
 
     as = fs.getAclStatus(p);
     for (AclEntry entry : maclSpec) {
-      assertTrue(String.format("as: %s, entry: %s", as, entry),
-          as.getEntries().contains(entry));
+        assertTrue(
+                as.getEntries().contains(entry), String.format("as: %s, entry: %s", as, entry));
     }
 
     fs.removeAclEntries(p, maclSpec);
@@ -539,7 +539,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingMultipleRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingMultipleRacks.java
index e47cbf0..097ae4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingMultipleRacks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingMultipleRacks.java
@@ -27,10 +27,10 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -40,8 +40,8 @@
 import java.util.Map;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test erasure coding block placement with skewed # nodes per rack.
@@ -70,7 +70,7 @@
   private Configuration conf;
   private DistributedFileSystem dfs;
 
-  @Before
+  @BeforeEach
   public void setup() {
     ecPolicy = getPolicy();
     conf = new HdfsConfiguration();
@@ -98,7 +98,7 @@
     dfs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void teardown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 835d18f..57c1f62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -41,11 +41,11 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.io.FileNotFoundException;
@@ -58,7 +58,7 @@
 import java.util.Map;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestErasureCodingPolicies {
   private Configuration conf;
@@ -75,7 +75,7 @@
   @Rule
   public Timeout timeout = new Timeout(60 * 1000);
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws IOException {
     ecPolicy = getEcPolicy();
     conf = new HdfsConfiguration();
@@ -89,7 +89,7 @@
     DFSTestUtil.enableAllECPolicies(fs);
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -227,13 +227,13 @@
 
     // Already set directory-level policies should still be in effect
     Path disabledPolicy = new Path(dir1, "afterDisabled");
-    Assert.assertEquals("Dir does not have policy set",
-        ecPolicy,
-        fs.getErasureCodingPolicy(dir1));
+      Assertions.assertEquals(
+              ecPolicy,
+              fs.getErasureCodingPolicy(dir1), "Dir does not have policy set");
     fs.create(disabledPolicy).close();
-    Assert.assertEquals("File did not inherit dir's policy",
-        ecPolicy,
-        fs.getErasureCodingPolicy(disabledPolicy));
+      Assertions.assertEquals(
+              ecPolicy,
+              fs.getErasureCodingPolicy(disabledPolicy), "File did not inherit dir's policy");
 
     // Also check loading disabled EC policies from fsimage
     fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
@@ -241,12 +241,12 @@
     fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
     cluster.restartNameNodes();
 
-    Assert.assertEquals("Dir does not have policy set",
-        ecPolicy,
-        fs.getErasureCodingPolicy(dir1));
-    Assert.assertEquals("File does not have policy set",
-        ecPolicy,
-        fs.getErasureCodingPolicy(disabledPolicy));
+      Assertions.assertEquals(
+              ecPolicy,
+              fs.getErasureCodingPolicy(dir1), "Dir does not have policy set");
+      Assertions.assertEquals(
+              ecPolicy,
+              fs.getErasureCodingPolicy(disabledPolicy), "File does not have policy set");
   }
 
   @Test
@@ -325,19 +325,19 @@
     final Path reserveDir = new Path("/.reserved");
     // verify the EC policy is null, not an exception
     ErasureCodingPolicy policy = fs.getErasureCodingPolicy(reserveDir);
-    assertNull("Got unexpected erasure coding policy", policy);
+      assertNull(policy, "Got unexpected erasure coding policy");
 
     // root EC policy before being set is null, verify the reserved raw dir
     // is treated as root
     final Path root = new Path("/");
     final Path rawRoot = new Path("/.reserved/raw");
     final Path rawRootSlash = new Path("/.reserved/raw/");
-    assertNull("Got unexpected erasure coding policy",
-        fs.getErasureCodingPolicy(root));
-    assertNull("Got unexpected erasure coding policy",
-        fs.getErasureCodingPolicy(rawRoot));
-    assertNull("Got unexpected erasure coding policy",
-        fs.getErasureCodingPolicy(rawRootSlash));
+      assertNull(
+              fs.getErasureCodingPolicy(root), "Got unexpected erasure coding policy");
+      assertNull(
+              fs.getErasureCodingPolicy(rawRoot), "Got unexpected erasure coding policy");
+      assertNull(
+              fs.getErasureCodingPolicy(rawRootSlash), "Got unexpected erasure coding policy");
 
     // verify the EC policy correctness under the reserved raw dir
     final Path ecDir = new Path("/ec");
@@ -345,21 +345,21 @@
     fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
 
     ErasureCodingPolicy policyBase = fs.getErasureCodingPolicy(ecDir);
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        policyBase);
+      assertEquals(ecPolicy,
+              policyBase, "Got unexpected erasure coding policy");
 
     final Path rawRootEc = new Path("/.reserved/raw/ec");
     ErasureCodingPolicy policyMap = fs.getErasureCodingPolicy(rawRootEc);
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        policyMap);
+      assertEquals(ecPolicy,
+              policyMap, "Got unexpected erasure coding policy");
   }
 
   @Test
   public void testGetErasureCodingPolicy() throws Exception {
     List<ErasureCodingPolicy> sysECPolicies =
         SystemErasureCodingPolicies.getPolicies();
-    assertTrue("System ecPolicies should exist",
-        sysECPolicies.size() > 0);
+      assertTrue(
+              sysECPolicies.size() > 0, "System ecPolicies should exist");
 
     ErasureCodingPolicy usingECPolicy = sysECPolicies.get(0);
     String src = "/ec2";
@@ -380,8 +380,8 @@
     HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
     ErasureCodingPolicy actualPolicy = hdfsFileStatus.getErasureCodingPolicy();
     assertNotNull(actualPolicy);
-    assertEquals("Actually used ecPolicy should be equal with target ecPolicy",
-        usingECPolicy, actualPolicy);
+      assertEquals(
+              usingECPolicy, actualPolicy, "Actually used ecPolicy should be equal with target ecPolicy");
   }
 
   @Test
@@ -434,7 +434,7 @@
         sysPolicies.remove(ecpi.getPolicy());
       }
     }
-    assertTrue("All system policies should be enabled", sysPolicies.isEmpty());
+      assertTrue(sysPolicies.isEmpty(), "All system policies should be enabled");
 
     // Query after add a new policy
     ECSchema toAddSchema = new ECSchema("rs", 5, 2);
@@ -443,9 +443,9 @@
     ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy};
     fs.addErasureCodingPolicies(policyArray);
     allECPolicies = fs.getAllErasureCodingPolicies();
-    assertEquals("Should return new added policy",
-        SystemErasureCodingPolicies.getPolicies().size() + 1,
-        allECPolicies.size());
+      assertEquals(
+              SystemErasureCodingPolicies.getPolicies().size() + 1,
+              allECPolicies.size(), "Should return new added policy");
 
   }
 
@@ -515,13 +515,13 @@
     userfs.mkdirs(ecdir);
     final String ecPolicyName = ecPolicy.getName();
     useradmin.setErasureCodingPolicy(ecdir, ecPolicyName);
-    assertEquals("Policy not present on dir",
-        ecPolicyName,
-        useradmin.getErasureCodingPolicy(ecdir).getName());
+      assertEquals(
+              ecPolicyName,
+              useradmin.getErasureCodingPolicy(ecdir).getName(), "Policy not present on dir");
     userfs.create(ecfile).close();
-    assertEquals("Policy not present on file",
-        ecPolicyName,
-        useradmin.getErasureCodingPolicy(ecfile).getName());
+      assertEquals(
+              ecPolicyName,
+              useradmin.getErasureCodingPolicy(ecfile).getName(), "Policy not present on file");
 
     // Unset and re-set
     useradmin.unsetErasureCodingPolicy(ecdir);
@@ -631,7 +631,7 @@
     final String illegalPolicyName = "RS-DEFAULT-1-2-64k";
     try {
       fs.createFile(filePath1).ecPolicyName(illegalPolicyName).build().close();
-      Assert.fail("illegal erasure coding policy should not be found");
+      Assertions.fail("illegal erasure coding policy should not be found");
     } catch (Exception e) {
       GenericTestUtils.assertExceptionContains("Policy '" + illegalPolicyName
           + "' does not match any enabled erasure coding policies", e);
@@ -691,7 +691,7 @@
           .ecPolicyName(ecPolicyName)
           .replicate()
           .build().close();
-      Assert.fail("shouldReplicate and ecPolicyName are exclusive " +
+      Assertions.fail("shouldReplicate and ecPolicyName are exclusive " +
           "parameters. Set both is not allowed.");
     }catch (Exception e){
       GenericTestUtils.assertExceptionContains("SHOULD_REPLICATE flag and " +
@@ -704,7 +704,7 @@
           EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE,
               CreateFlag.SHOULD_REPLICATE), false, (short) 1, 1024, null, 1024,
           null, null, ecPolicyName);
-      Assert.fail("SHOULD_REPLICATE flag and ecPolicyName are exclusive " +
+      Assertions.fail("SHOULD_REPLICATE flag and ecPolicyName are exclusive " +
           "parameters. Set both is not allowed.");
     }catch (Exception e){
       GenericTestUtils.assertExceptionContains("SHOULD_REPLICATE flag and " +
@@ -723,8 +723,8 @@
   public void testGetAllErasureCodingCodecs() throws Exception {
     Map<String, String> allECCodecs = fs
         .getAllErasureCodingCodecs();
-    assertTrue("At least 3 system codecs should be enabled",
-        allECCodecs.size() >= 3);
+      assertTrue(
+              allECCodecs.size() >= 3, "At least 3 system codecs should be enabled");
     System.out.println("Erasure Coding Codecs: Codec [Coder List]");
     for (String codec : allECCodecs.keySet()) {
       String coders = allECCodecs.get(codec);
@@ -762,7 +762,7 @@
     for (int cellSize: cellSizes) {
       try {
         new ErasureCodingPolicy(toAddSchema, cellSize);
-        Assert.fail("Invalid cell size should be detected.");
+        Assertions.fail("Invalid cell size should be detected.");
       } catch (Exception e){
         GenericTestUtils.assertExceptionContains("cellSize must be", e);
       }
@@ -852,66 +852,66 @@
     fs.mkdirs(replicaDir);
     fs.createFile(replicaFile).build().close();
     HdfsFileStatus fileStatus = (HdfsFileStatus)fs.getFileStatus(replicaFile);
-    assertEquals("File should inherit EC policy.", ecPolicy, fileStatus
-        .getErasureCodingPolicy());
-    assertEquals("File should be a EC file.", true, fileStatus
-        .isErasureCoded());
-    assertEquals("File should have the same EC policy as its ancestor.",
-        ecPolicy, fs.getErasureCodingPolicy(replicaFile));
+      assertEquals(ecPolicy, fileStatus
+              .getErasureCodingPolicy(), "File should inherit EC policy.");
+      assertEquals(true, fileStatus
+              .isErasureCoded(), "File should be a EC file.");
+      assertEquals(
+              ecPolicy, fs.getErasureCodingPolicy(replicaFile), "File should have the same EC policy as its ancestor.");
     fs.delete(replicaFile, false);
 
     // 2. Set replication policy on child directory, then get back the policy
     fs.setErasureCodingPolicy(replicaDir, replicaPolicy.getName());
     ErasureCodingPolicy temp = fs.getErasureCodingPolicy(replicaDir);
-    assertEquals("Directory should hide replication EC policy.",
-        null, temp);
+      assertEquals(
+              null, temp, "Directory should hide replication EC policy.");
 
     // 3. New file will be replication file. Please be noted that replication
     //    policy only set on directory, not on file
     fs.createFile(replicaFile).build().close();
-    assertEquals("Replication file should have default replication factor.",
-        fs.getDefaultReplication(),
-        fs.getFileStatus(replicaFile).getReplication());
+      assertEquals(
+              fs.getDefaultReplication(),
+              fs.getFileStatus(replicaFile).getReplication(), "Replication file should have default replication factor.");
     fs.setReplication(replicaFile, (short) 2);
-    assertEquals("File should have replication factor as expected.",
-        2, fs.getFileStatus(replicaFile).getReplication());
+      assertEquals(
+              2, fs.getFileStatus(replicaFile).getReplication(), "File should have replication factor as expected.");
     fileStatus = (HdfsFileStatus)fs.getFileStatus(replicaFile);
-    assertEquals("File should not have EC policy.", null, fileStatus
-        .getErasureCodingPolicy());
-    assertEquals("File should not be a EC file.", false,
-        fileStatus.isErasureCoded());
+      assertEquals(null, fileStatus
+              .getErasureCodingPolicy(), "File should not have EC policy.");
+      assertEquals(false,
+              fileStatus.isErasureCoded(), "File should not be a EC file.");
     ErasureCodingPolicy ecPolicyOnFile = fs.getErasureCodingPolicy(replicaFile);
-    assertEquals("File should not have EC policy.", null, ecPolicyOnFile);
+      assertEquals(null, ecPolicyOnFile, "File should not have EC policy.");
     fs.delete(replicaFile, false);
 
     // 4. New directory under replication directory, is also replication
     // directory
     fs.mkdirs(subReplicaDir);
-    assertEquals("Directory should inherit hiding replication EC policy.",
-        null, fs.getErasureCodingPolicy(subReplicaDir));
+      assertEquals(
+              null, fs.getErasureCodingPolicy(subReplicaDir), "Directory should inherit hiding replication EC policy.");
     fs.createFile(subReplicaFile).build().close();
-    assertEquals("File should have default replication factor.",
-        fs.getDefaultReplication(),
-        fs.getFileStatus(subReplicaFile).getReplication());
+      assertEquals(
+              fs.getDefaultReplication(),
+              fs.getFileStatus(subReplicaFile).getReplication(), "File should have default replication factor.");
     fileStatus = (HdfsFileStatus)fs.getFileStatus(subReplicaFile);
-    assertEquals("File should not have EC policy.", null,
-        fileStatus.getErasureCodingPolicy());
-    assertEquals("File should not be a EC file.", false,
-        fileStatus.isErasureCoded());
-    assertEquals("File should not have EC policy.", null,
-        fs.getErasureCodingPolicy(subReplicaFile));
+      assertEquals(null,
+              fileStatus.getErasureCodingPolicy(), "File should not have EC policy.");
+      assertEquals(false,
+              fileStatus.isErasureCoded(), "File should not be a EC file.");
+      assertEquals(null,
+              fs.getErasureCodingPolicy(subReplicaFile), "File should not have EC policy.");
     fs.delete(subReplicaFile, false);
 
     // 5. Unset replication policy on directory, new file will be EC file
     fs.unsetErasureCodingPolicy(replicaDir);
     fs.createFile(subReplicaFile).build().close();
     fileStatus = (HdfsFileStatus)fs.getFileStatus(subReplicaFile);
-    assertEquals("File should inherit EC policy.", ecPolicy,
-        fileStatus.getErasureCodingPolicy());
-    assertEquals("File should be a EC file.", true,
-        fileStatus.isErasureCoded());
-    assertEquals("File should have the same EC policy as its ancestor",
-        ecPolicy, fs.getErasureCodingPolicy(subReplicaFile));
+      assertEquals(ecPolicy,
+              fileStatus.getErasureCodingPolicy(), "File should inherit EC policy.");
+      assertEquals(true,
+              fileStatus.isErasureCoded(), "File should be a EC file.");
+      assertEquals(
+              ecPolicy, fs.getErasureCodingPolicy(subReplicaFile), "File should have the same EC policy as its ancestor");
     fs.delete(subReplicaFile, false);
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
index b7e7bba..2b53b91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 import java.io.IOException;
 
@@ -31,9 +31,9 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.Rule;
 import org.junit.rules.Timeout;
 
@@ -53,7 +53,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(120000);
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws IOException {
     ecPolicy = getEcPolicy();
     groupSize = (short) (ecPolicy.getNumDataUnits()
@@ -65,7 +65,7 @@
     fs.enableErasureCodingPolicy(ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -91,8 +91,8 @@
     String contents = DFSTestUtil.readFile(fs, ecFile);
     final Path snap1 = fs.createSnapshot(ecDirParent, "snap1");
     final Path snap1ECDir = new Path(snap1, ecDir.getName());
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        fs.getErasureCodingPolicy(snap1ECDir));
+      assertEquals(ecPolicy,
+              fs.getErasureCodingPolicy(snap1ECDir), "Got unexpected erasure coding policy");
 
     // Now delete the dir which has erasure coding policy. Re-create the dir again, and
     // take another snapshot
@@ -100,8 +100,8 @@
     fs.mkdir(ecDir, FsPermission.getDirDefault());
     final Path snap2 = fs.createSnapshot(ecDirParent, "snap2");
     final Path snap2ECDir = new Path(snap2, ecDir.getName());
-    assertNull("Expected null erasure coding policy",
-        fs.getErasureCodingPolicy(snap2ECDir));
+      assertNull(
+              fs.getErasureCodingPolicy(snap2ECDir), "Expected null erasure coding policy");
 
     // Make dir again with system default ec policy
     fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
@@ -109,31 +109,31 @@
     final Path snap3ECDir = new Path(snap3, ecDir.getName());
     // Check that snap3's ECPolicy has the correct settings
     ErasureCodingPolicy ezSnap3 = fs.getErasureCodingPolicy(snap3ECDir);
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        ezSnap3);
+      assertEquals(ecPolicy,
+              ezSnap3, "Got unexpected erasure coding policy");
 
-    // Check that older snapshots still have the old ECPolicy settings
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        fs.getErasureCodingPolicy(snap1ECDir));
-    assertNull("Expected null erasure coding policy",
-        fs.getErasureCodingPolicy(snap2ECDir));
+      // Check that older snapshots still have the old ECPolicy settings
+      assertEquals(ecPolicy,
+              fs.getErasureCodingPolicy(snap1ECDir), "Got unexpected erasure coding policy");
+      assertNull(
+              fs.getErasureCodingPolicy(snap2ECDir), "Expected null erasure coding policy");
 
     // Verify contents of the snapshotted file
     final Path snapshottedECFile = new Path(snap1.toString() + "/"
         + ecDir.getName() + "/" + ecFile.getName());
-    assertEquals("Contents of snapshotted file have changed unexpectedly",
-        contents, DFSTestUtil.readFile(fs, snapshottedECFile));
+      assertEquals(
+              contents, DFSTestUtil.readFile(fs, snapshottedECFile), "Contents of snapshotted file have changed unexpectedly");
 
     // Now delete the snapshots out of order and verify the EC policy
     // correctness
     fs.deleteSnapshot(ecDirParent, snap2.getName());
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        fs.getErasureCodingPolicy(snap1ECDir));
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        fs.getErasureCodingPolicy(snap3ECDir));
+      assertEquals(ecPolicy,
+              fs.getErasureCodingPolicy(snap1ECDir), "Got unexpected erasure coding policy");
+      assertEquals(ecPolicy,
+              fs.getErasureCodingPolicy(snap3ECDir), "Got unexpected erasure coding policy");
     fs.deleteSnapshot(ecDirParent, snap1.getName());
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        fs.getErasureCodingPolicy(snap3ECDir));
+      assertEquals(ecPolicy,
+              fs.getErasureCodingPolicy(snap3ECDir), "Got unexpected erasure coding policy");
   }
 
   /**
@@ -147,8 +147,8 @@
 
     fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        fs.getErasureCodingPolicy(snap1));
+      assertEquals(ecPolicy,
+              fs.getErasureCodingPolicy(snap1), "Got unexpected erasure coding policy");
   }
 
   /**
@@ -164,8 +164,8 @@
     fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
     ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap1);
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        ecSnap);
+      assertEquals(ecPolicy,
+              ecSnap, "Got unexpected erasure coding policy");
 
     // save namespace, restart namenode, and check ec policy correctness.
     fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
@@ -174,10 +174,9 @@
     cluster.restartNameNode(true);
 
     ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(snap1);
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        ecSnap1);
-    assertEquals("Got unexpected ecSchema", ecSnap.getSchema(),
-        ecSnap1.getSchema());
+    assertEquals(ecPolicy, ecSnap1, "Got unexpected erasure coding policy");
+    assertEquals(ecSnap.getSchema(), ecSnap1.getSchema(),
+        "Got unexpected ecSchema");
   }
 
   /**
@@ -202,12 +201,12 @@
     String[] argv = new String[] { "-cp", "-px", snap1.toUri().toString(),
         snap1Copy.toUri().toString() };
     int ret = ToolRunner.run(new FsShell(conf), argv);
-    assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
+      assertEquals(SUCCESS, ret, "cp -px is not working on a snapshot");
 
-    assertNull("Got unexpected erasure coding policy",
-        fs.getErasureCodingPolicy(snap1CopyECDir));
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        fs.getErasureCodingPolicy(snap1));
+      assertNull(
+              fs.getErasureCodingPolicy(snap1CopyECDir), "Got unexpected erasure coding policy");
+      assertEquals(ecPolicy,
+              fs.getErasureCodingPolicy(snap1), "Got unexpected erasure coding policy");
   }
 
   @Test (timeout = 300000)
@@ -247,13 +246,13 @@
 
     // verify the EC policy correctness
     ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap);
-    assertEquals("Got unexpected erasure coding policy", ecPolicy,
-        ecSnap);
+      assertEquals(ecPolicy,
+              ecSnap, "Got unexpected erasure coding policy");
 
     // verify the EC policy is null, not an exception
     final Path ecDotSnapshotDir = new Path(ecDir, ".snapshot");
     ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(ecDotSnapshotDir);
-    assertNull("Got unexpected erasure coding policy", ecSnap1);
+      assertNull(ecSnap1, "Got unexpected erasure coding policy");
   }
 
   /**
@@ -268,22 +267,22 @@
     fs.allowSnapshot(ecDir);
 
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
-    assertNull("Expected null erasure coding policy",
-        fs.getErasureCodingPolicy(snap1));
+      assertNull(
+              fs.getErasureCodingPolicy(snap1), "Expected null erasure coding policy");
 
     // Set erasure coding policy
     final ErasureCodingPolicy ec63Policy = SystemErasureCodingPolicies
         .getByID(SystemErasureCodingPolicies.RS_6_3_POLICY_ID);
     fs.setErasureCodingPolicy(ecDir, ec63Policy.getName());
     final Path snap2 = fs.createSnapshot(ecDir, "snap2");
-    assertEquals("Got unexpected erasure coding policy", ec63Policy,
-        fs.getErasureCodingPolicy(snap2));
+      assertEquals(ec63Policy,
+              fs.getErasureCodingPolicy(snap2), "Got unexpected erasure coding policy");
 
     // Verify the EC policy correctness after the unset operation
     fs.unsetErasureCodingPolicy(ecDir);
     final Path snap3 = fs.createSnapshot(ecDir, "snap3");
-    assertNull("Expected null erasure coding policy",
-        fs.getErasureCodingPolicy(snap3));
+      assertNull(
+              fs.getErasureCodingPolicy(snap3), "Expected null erasure coding policy");
 
     // Change the erasure coding policy and take another snapshot
     final ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies
@@ -291,15 +290,15 @@
     fs.enableErasureCodingPolicy(ec32Policy.getName());
     fs.setErasureCodingPolicy(ecDir, ec32Policy.getName());
     final Path snap4 = fs.createSnapshot(ecDir, "snap4");
-    assertEquals("Got unexpected erasure coding policy", ec32Policy,
-        fs.getErasureCodingPolicy(snap4));
+      assertEquals(ec32Policy,
+              fs.getErasureCodingPolicy(snap4), "Got unexpected erasure coding policy");
 
-    // Check that older snapshot still have the old ECPolicy settings
-    assertNull("Expected null erasure coding policy",
-        fs.getErasureCodingPolicy(snap1));
-    assertEquals("Got unexpected erasure coding policy", ec63Policy,
-        fs.getErasureCodingPolicy(snap2));
-    assertNull("Expected null erasure coding policy",
-        fs.getErasureCodingPolicy(snap3));
+      // Check that older snapshot still have the old ECPolicy settings
+      assertNull(
+              fs.getErasureCodingPolicy(snap1), "Expected null erasure coding policy");
+      assertEquals(ec63Policy,
+              fs.getErasureCodingPolicy(snap2), "Got unexpected erasure coding policy");
+      assertNull(
+              fs.getErasureCodingPolicy(snap3), "Expected null erasure coding policy");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java
index b4baadf..0d19c10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExtendedAcls.java
@@ -27,9 +27,9 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Lists;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -49,9 +49,7 @@
 import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
 import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * A class for testing the behavior of HDFS directory and file ACL.
@@ -65,7 +63,7 @@
 
   private static DistributedFileSystem hdfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true);
@@ -76,7 +74,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java
index f153b2c..b3ad931 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java
@@ -28,8 +28,8 @@
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.nio.BufferOverflowException;
@@ -63,7 +63,7 @@
       IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
       byte expected[] = DFSTestUtil.
           calculateFileContentsFromSeed(SEED, TEST_LENGTH);
-      Assert.assertArrayEquals(expected, buf);
+      Assertions.assertArrayEquals(expected, buf);
       stream.close();
     } finally {
       dfs.close();
@@ -293,36 +293,36 @@
       byte expected[] = DFSTestUtil.
           calculateFileContentsFromSeed(SEED, TEST_LENGTH);
       ReadStatistics stats = stream.getReadStatistics();
-      Assert.assertEquals(1024, stats.getTotalShortCircuitBytesRead());
-      Assert.assertEquals(2047, stats.getTotalLocalBytesRead());
-      Assert.assertEquals(2047, stats.getTotalBytesRead());
-      Assert.assertArrayEquals(expected, buf);
+      Assertions.assertEquals(1024, stats.getTotalShortCircuitBytesRead());
+      Assertions.assertEquals(2047, stats.getTotalLocalBytesRead());
+      Assertions.assertEquals(2047, stats.getTotalBytesRead());
+      Assertions.assertArrayEquals(expected, buf);
       stream.close();
       ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, new Path("/a"));
-      Assert.assertNotNull(block);
+      Assertions.assertNotNull(block);
       LinkedList<SyntheticReplicaAccessor> accessorList = accessors.get(uuid);
-      Assert.assertNotNull(accessorList);
-      Assert.assertEquals(3, accessorList.size());
+      Assertions.assertNotNull(accessorList);
+      Assertions.assertEquals(3, accessorList.size());
       SyntheticReplicaAccessor accessor = accessorList.get(0);
-      Assert.assertTrue(accessor.builder.allowShortCircuit);
-      Assert.assertEquals(block.getBlockPoolId(),
+      Assertions.assertTrue(accessor.builder.allowShortCircuit);
+      Assertions.assertEquals(block.getBlockPoolId(),
           accessor.builder.blockPoolId);
-      Assert.assertEquals(block.getBlockId(),
+      Assertions.assertEquals(block.getBlockId(),
           accessor.builder.blockId);
-      Assert.assertEquals(dfs.getClient().clientName,
+      Assertions.assertEquals(dfs.getClient().clientName,
           accessor.builder.clientName);
-      Assert.assertEquals("/a", accessor.builder.fileName);
-      Assert.assertEquals(block.getGenerationStamp(),
+      Assertions.assertEquals("/a", accessor.builder.fileName);
+      Assertions.assertEquals(block.getGenerationStamp(),
           accessor.getGenerationStamp());
-      Assert.assertTrue(accessor.builder.verifyChecksum);
-      Assert.assertEquals(1024L, accessor.builder.visibleLength);
-      Assert.assertEquals(24L, accessor.totalRead);
-      Assert.assertEquals("", accessor.getError());
-      Assert.assertEquals(1, accessor.numCloses);
+      Assertions.assertTrue(accessor.builder.verifyChecksum);
+      Assertions.assertEquals(1024L, accessor.builder.visibleLength);
+      Assertions.assertEquals(24L, accessor.totalRead);
+      Assertions.assertEquals("", accessor.getError());
+      Assertions.assertEquals(1, accessor.numCloses);
       byte[] tempBuf = new byte[5];
-      Assert.assertEquals(-1, accessor.read(TEST_LENGTH,
+      Assertions.assertEquals(-1, accessor.read(TEST_LENGTH,
             tempBuf, 0, 0));
-      Assert.assertEquals(-1, accessor.read(TEST_LENGTH,
+      Assertions.assertEquals(-1, accessor.read(TEST_LENGTH,
             tempBuf, 0, tempBuf.length));
       accessors.remove(uuid);
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
index e7f3b9f..e471e1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.EOFException;
 import java.io.File;
@@ -40,7 +37,7 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests if FSInputChecker works correctly.
@@ -71,9 +68,9 @@
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, 
       String message) throws Exception {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                        expected[from+idx]+" actual "+actual[idx],
-                        actual[idx], expected[from+idx]);
+        assertEquals(
+                actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
index 9dcd449..d63bb99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.Random;
@@ -29,7 +29,7 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests if FSOutputSummer works correctly.
@@ -90,9 +90,9 @@
   private void checkAndEraseData(byte[] actual, int from, byte[] expected,
       String message) throws Exception {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                        expected[from+idx]+" actual "+actual[idx],
-                        actual[idx], expected[from+idx]);
+        assertEquals(
+                actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
index 7e1e593..c68d122 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
@@ -20,7 +20,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -41,10 +41,10 @@
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestFetchImage {
   
@@ -58,17 +58,17 @@
   private NameNode nn1 = null;
   private Configuration conf = null;
 
-  @BeforeClass
+  @BeforeAll
   public static void setupImageDir() {
     FETCHED_IMAGE_FILE.mkdirs();
   }
 
-  @AfterClass
+  @AfterAll
   public static void cleanup() {
     FileUtil.fullyDelete(FETCHED_IMAGE_FILE);
   }
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws IOException, URISyntaxException {
     conf = new Configuration();
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index b65301f8..485f5f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -17,9 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -31,6 +30,8 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeoutException;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CreateFlag;
@@ -58,8 +59,8 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 /**
  * This class tests the building blocks that are needed to
@@ -137,8 +138,8 @@
 
       // Get a handle to the datanode
       DataNode[] dn = cluster.listDataNodes();
-      assertTrue("There should be only one datanode but found " + dn.length,
-                  dn.length == 1);
+        assertTrue(
+                dn.length == 1, "There should be only one datanode but found " + dn.length);
 
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
@@ -161,8 +162,8 @@
       for (int i = 0; i < blocks.size(); i++) {
         ExtendedBlock b = blocks.get(i).getBlock();
         System.out.println("breakHardlinksIfNeeded detaching block " + b);
-        assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned true",
-            FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
+          assertTrue(
+                  FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b), "breakHardlinksIfNeeded(" + b + ") should have returned true");
       }
 
       // Since the blocks were already detached earlier, these calls should
@@ -171,8 +172,8 @@
         ExtendedBlock b = blocks.get(i).getBlock();
         System.out.println("breakHardlinksIfNeeded re-attempting to " +
                 "detach block " + b);
-        assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned false",
-            FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
+          assertTrue(
+                  FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b), "breakHardlinksIfNeeded(" + b + ") should have returned false");
       }
     } finally {
       client.close();
@@ -335,10 +336,10 @@
       
       //2nd append should get AlreadyBeingCreatedException
       fs1.append(p);
-      Assert.fail();
+      Assertions.fail();
     } catch(RemoteException re) {
       AppendTestUtil.LOG.info("Got an exception:", re);
-      Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
+      Assertions.assertEquals(AlreadyBeingCreatedException.class.getName(),
           re.getClassName());
     } finally {
       fs2.close();
@@ -376,10 +377,10 @@
 
       // 2nd append should get AlreadyBeingCreatedException
       fs1.append(p);
-      Assert.fail();
+      Assertions.fail();
     } catch(RemoteException re) {
       AppendTestUtil.LOG.info("Got an exception:", re);
-      Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
+      Assertions.assertEquals(AlreadyBeingCreatedException.class.getName(),
           re.getClassName());
     } finally {
       fs2.close();
@@ -428,13 +429,13 @@
         fileLen += appendLen;
       }
 
-      Assert.assertEquals(fileLen, fs.getFileStatus(p).getLen());
+      Assertions.assertEquals(fileLen, fs.getFileStatus(p).getLen());
       final byte[] actual = new byte[fileLen];
       final FSDataInputStream in = fs.open(p);
       in.readFully(actual);
       in.close();
       for(int i = 0; i < fileLen; i++) {
-        Assert.assertEquals(data[i], actual[i]);
+        Assertions.assertEquals(data[i], actual[i]);
       }
     } finally {
       fs.close();
@@ -675,7 +676,7 @@
       Path fileName = new Path("/appendCorruptBlock");
       DFSTestUtil.createFile(fs, fileName, 512, (short) 1, 0);
       DFSTestUtil.waitReplication(fs, fileName, (short) 1);
-      Assert.assertTrue("File not created", fs.exists(fileName));
+        Assertions.assertTrue(fs.exists(fileName), "File not created");
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
       cluster.corruptBlockOnDataNodes(block);
       DFSTestUtil.appendFile(fs, fileName, "appendCorruptBlock");
@@ -707,7 +708,7 @@
       Path fileName = new Path("/appendCorruptBlock");
       DFSTestUtil.createFile(fs, fileName, initialFileLength, (short) 1, 0);
       DFSTestUtil.waitReplication(fs, fileName, (short) 1);
-      Assert.assertTrue("File not created", fs.exists(fileName));
+        Assertions.assertTrue(fs.exists(fileName), "File not created");
 
       // Call FsDatasetImpl#append to append the block file,
       // which converts it to a rbw replica.
@@ -738,7 +739,7 @@
       // checksum, rather than on-disk checksum. Otherwise it will see a
       // checksum mismatch error.
       final byte[] readBlock = DFSTestUtil.readFileBuffer(fs, fileName);
-      assertEquals("should have read only one byte!", 1, readBlock.length);
+        assertEquals(1, readBlock.length, "should have read only one byte!");
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
index 9929cb2..2f5cff0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -42,7 +39,7 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -445,10 +442,10 @@
             } catch (InterruptedException e) {}
           }
 
-          assertTrue("File " + testfile + " size is " + 
-                     fs.getFileStatus(testfile).getLen() +
-                     " but expected " + (len + sizeToAppend),
-                    fs.getFileStatus(testfile).getLen() == (len + sizeToAppend));
+            assertTrue(
+                    fs.getFileStatus(testfile).getLen() == (len + sizeToAppend), "File " + testfile + " size is " +
+                    fs.getFileStatus(testfile).getLen() +
+                    " but expected " + (len + sizeToAppend));
 
           AppendTestUtil.checkFullFile(fs, testfile, (int) (len + sizeToAppend),
               fileContents, "Read 2");
@@ -460,9 +457,9 @@
                                " " + e);
             e.printStackTrace();
           }
-          assertTrue("Workload exception " + id + " testfile " + testfile +
-                     " expected size " + (len + sizeToAppend),
-                     false);
+            assertTrue(
+                    false, "Workload exception " + id + " testfile " + testfile +
+                    " expected size " + (len + sizeToAppend));
         }
 
         // Add testfile back to the pool of files.
@@ -527,10 +524,10 @@
       cluster.shutdown();
     }
 
-    // If any of the worker thread failed in their job, indicate that
-    // this test failed.
-    //
-    assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
+      // If any of the worker thread failed in their job, indicate that
+      // this test failed.
+      //
+      assertTrue(globalStatus, "testComplexAppend Worker encountered exceptions.");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
index 3e9adca..8d42879 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -46,10 +44,10 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /** This class implements some of tests posted in HADOOP-2658. */
@@ -70,7 +68,7 @@
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem fs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws java.lang.Exception {
     AppendTestUtil.LOG.info("setUp()");
     conf = new HdfsConfiguration();
@@ -80,7 +78,7 @@
     fs = cluster.getFileSystem();
   }
    
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     AppendTestUtil.LOG.info("tearDown()");
     if(fs != null) fs.close();
@@ -201,10 +199,10 @@
     AppendTestUtil.check(fs, p, len1 + len2);
     List<LocatedBlock> blocks = fs.getClient().getLocatedBlocks(
         p.toString(), 0L).getLocatedBlocks();
-    Assert.assertEquals(3, blocks.size());
-    Assert.assertEquals(BLOCK_SIZE, blocks.get(0).getBlockSize());
-    Assert.assertEquals(BLOCK_SIZE / 2, blocks.get(1).getBlockSize());
-    Assert.assertEquals(BLOCK_SIZE / 4, blocks.get(2).getBlockSize());
+    Assertions.assertEquals(3, blocks.size());
+    Assertions.assertEquals(BLOCK_SIZE, blocks.get(0).getBlockSize());
+    Assertions.assertEquals(BLOCK_SIZE / 2, blocks.get(1).getBlockSize());
+    Assertions.assertEquals(BLOCK_SIZE / 4, blocks.get(2).getBlockSize());
   }
 
   /**
@@ -429,9 +427,9 @@
     AppendTestUtil.check(fs, p, len1 + len2);
     if (appendToNewBlock) {
       LocatedBlocks blks = fs.dfs.getLocatedBlocks(p.toString(), 0);
-      Assert.assertEquals(2, blks.getLocatedBlocks().size());
-      Assert.assertEquals(len1, blks.getLocatedBlocks().get(0).getBlockSize());
-      Assert.assertEquals(len2, blks.getLocatedBlocks().get(1).getBlockSize());
+      Assertions.assertEquals(2, blks.getLocatedBlocks().size());
+      Assertions.assertEquals(len1, blks.getLocatedBlocks().get(0).getBlockSize());
+      Assertions.assertEquals(len2, blks.getLocatedBlocks().get(1).getBlockSize());
       AppendTestUtil.check(fs, p, 0, len1);
       AppendTestUtil.check(fs, p, len1, len2);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
index 8c672b5..8d8a200 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
@@ -47,8 +45,8 @@
 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 /* File Append tests for HDFS-200 & HDFS-142, specifically focused on:
@@ -72,7 +70,7 @@
     GenericTestUtils.setLogLevel(DFSClient.LOG, Level.TRACE);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     this.conf = new Configuration();
 
@@ -379,7 +377,7 @@
       FSDirectory dir = cluster.getNamesystem().getFSDirectory();
       final INodeFile inode = INodeFile.
           valueOf(dir.getINode("/testAppend"), "/testAppend");
-      assertTrue("File should remain closed", !inode.isUnderConstruction());
+        assertTrue(!inode.isUnderConstruction(), "File should remain closed");
     } finally {
       if (null != fileSystem) {
         fileSystem.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
index a2b344c..ca72e91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -38,7 +38,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Unit test to make sure that Append properly logs the right
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
index c19d8c3..d583e21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
@@ -30,11 +30,11 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -100,7 +100,7 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     int numDNs = dataBlocks + parityBlocks + 2;
     conf = new Configuration();
@@ -124,7 +124,7 @@
     GenericTestUtils.setLogLevel(FileChecksumHelper.LOG, Level.DEBUG);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -194,9 +194,9 @@
     LOG.info("stripedFileChecksum2:" + stripedFileChecksum2);
     LOG.info("stripedFileChecksum3:" + stripedFileChecksum3);
 
-    Assert.assertTrue(stripedFileChecksum1.equals(stripedFileChecksum2));
+    Assertions.assertTrue(stripedFileChecksum1.equals(stripedFileChecksum2));
     if (range1 >=0 && range1 != range2) {
-      Assert.assertFalse(stripedFileChecksum1.equals(stripedFileChecksum3));
+      Assertions.assertFalse(stripedFileChecksum1.equals(stripedFileChecksum3));
     }
   }
 
@@ -209,9 +209,9 @@
         10, false);
 
     if (checksumCombineMode.equals(ChecksumCombineMode.COMPOSITE_CRC.name())) {
-      Assert.assertEquals(stripedFileChecksum1, replicatedFileChecksum);
+      Assertions.assertEquals(stripedFileChecksum1, replicatedFileChecksum);
     } else {
-      Assert.assertNotEquals(stripedFileChecksum1, replicatedFileChecksum);
+      Assertions.assertNotEquals(stripedFileChecksum1, replicatedFileChecksum);
     }
   }
 
@@ -228,9 +228,9 @@
     FileChecksum checksum2 = getFileChecksum(replicatedFile2, -1, false);
 
     if (checksumCombineMode.equals(ChecksumCombineMode.COMPOSITE_CRC.name())) {
-      Assert.assertEquals(checksum1, checksum2);
+      Assertions.assertEquals(checksum1, checksum2);
     } else {
-      Assert.assertNotEquals(checksum1, checksum2);
+      Assertions.assertNotEquals(checksum1, checksum2);
     }
   }
 
@@ -245,8 +245,8 @@
     LOG.info("stripedFileChecksum1:" + stripedFileChecksum1);
     LOG.info("stripedFileChecksumRecon:" + stripedFileChecksumRecon);
 
-    Assert.assertTrue("Checksum mismatches!",
-        stripedFileChecksum1.equals(stripedFileChecksumRecon));
+      Assertions.assertTrue(
+              stripedFileChecksum1.equals(stripedFileChecksumRecon), "Checksum mismatches!");
   }
 
   @Test(timeout = 90000)
@@ -263,12 +263,12 @@
     LOG.info("stripedFileChecksum2:" + stripedFileChecksum1);
     LOG.info("stripedFileChecksum2Recon:" + stripedFileChecksum2Recon);
 
-    Assert.assertTrue("Checksum mismatches!",
-        stripedFileChecksum1.equals(stripedFileChecksum2));
-    Assert.assertTrue("Checksum mismatches!",
-        stripedFileChecksum1.equals(stripedFileChecksum2Recon));
-    Assert.assertTrue("Checksum mismatches!",
-        stripedFileChecksum2.equals(stripedFileChecksum2Recon));
+      Assertions.assertTrue(
+              stripedFileChecksum1.equals(stripedFileChecksum2), "Checksum mismatches!");
+      Assertions.assertTrue(
+              stripedFileChecksum1.equals(stripedFileChecksum2Recon), "Checksum mismatches!");
+      Assertions.assertTrue(
+              stripedFileChecksum2.equals(stripedFileChecksum2Recon), "Checksum mismatches!");
   }
 
   private void testStripedFileChecksumWithMissedDataBlocksRangeQuery(
@@ -284,8 +284,8 @@
     LOG.info("stripedFileChecksum1:" + stripedFileChecksum1);
     LOG.info("stripedFileChecksumRecon:" + stripedFileChecksumRecon);
 
-    Assert.assertTrue("Checksum mismatches!",
-        stripedFileChecksum1.equals(stripedFileChecksumRecon));
+      Assertions.assertTrue(
+              stripedFileChecksum1.equals(stripedFileChecksumRecon), "Checksum mismatches!");
   }
 
   /**
@@ -544,8 +544,8 @@
       // getting result.
       FileChecksum fileChecksum1 = getFileChecksum(stripedFile4, -1, true);
 
-      Assert.assertEquals("checksum should be same", fileChecksum,
-          fileChecksum1);
+        Assertions.assertEquals(fileChecksum,
+                fileChecksum1, "checksum should be same");
     } finally {
       DataNodeFaultInjector.set(oldInjector);
     }
@@ -578,7 +578,7 @@
       DFSTestUtil.writeFile(fs, new Path(replicatedFile2), fileData);
       FileChecksum checksum1 = getFileChecksum(replicatedFile1, -1, false);
       FileChecksum checksum2 = getFileChecksum(replicatedFile2, -1, false);
-      Assert.assertEquals(checksum1, checksum2);
+      Assertions.assertEquals(checksum1, checksum2);
     } else {
       exception.expect(IOException.class);
       FileChecksum checksum = getFileChecksum(replicatedFile1, -1, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
index 0c7a3fc..e4cad8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
@@ -16,10 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -38,10 +35,10 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -77,13 +74,13 @@
   private FileSystem fileSystem;
 
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     conf = new Configuration();
     init(conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -124,10 +121,10 @@
     IOUtils.readFully(inputStream, buffer, 0, numBytes);
     inputStream.close();
 
-    assertTrue(
-      "unable to validate bytes",
-      validateSequentialBytes(buffer, 0, numBytes)
-    );
+      assertTrue(
+              validateSequentialBytes(buffer, 0, numBytes)
+      ,
+              "unable to validate bytes");
   }
 
   private void waitForBlocks(FileSystem fileSys, Path name)
@@ -273,8 +270,8 @@
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
     }
-    
-    assertNull(errorMessage.get(), errorMessage.get());
+
+      assertNull(errorMessage.get(), errorMessage.get());
   }
 
   // for some reason, using tranferTo evokes the race condition more often
@@ -291,7 +288,7 @@
   }
 
   // fails due to issue w/append, disable 
-  @Ignore
+  @Disabled
   public void _testUnfinishedBlockCRCErrorTransferToAppend()
     throws IOException {
     runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
@@ -309,7 +306,7 @@
   }
 
   // fails due to issue w/append, disable 
-  @Ignore
+  @Disabled
   public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
     throws IOException {
     runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
@@ -407,9 +404,9 @@
       writer.join();
       tailer.join();
 
-      assertFalse(
-        "error occurred, see log above", error.get()
-      );
+        assertFalse(error.get()
+        ,
+                "error occurred, see log above");
     } catch (InterruptedException e) {
       LOG.info("interrupted waiting for writer or tailer to complete");
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 381cf16..ea181b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -22,9 +22,7 @@
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
@@ -55,7 +53,7 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.event.Level;
 
@@ -86,7 +84,7 @@
       DataNode dn = cluster.getDataNodes().get(2);
       Map<DatanodeStorage, BlockListAsLongs> blockReports =
           dn.getFSDataset().getBlockReports(bpid);
-      assertTrue("Blocks do not exist on data-dir", !blockReports.isEmpty());
+        assertTrue(!blockReports.isEmpty(), "Blocks do not exist on data-dir");
       for (BlockListAsLongs report : blockReports.values()) {
         for (BlockReportReplica brr : report) {
           LOG.info("Deliberately removing block {}", brr.getBlockName());
@@ -94,8 +92,8 @@
               new ExtendedBlock(bpid, brr)).deleteData();
         }
       }
-      assertTrue("Corrupted replicas not handled properly.",
-                 util.checkFiles(fs, "/srcdat"));
+        assertTrue(
+                util.checkFiles(fs, "/srcdat"), "Corrupted replicas not handled properly.");
       util.cleanup(fs, "/srcdat");
     } finally {
       if (cluster != null) { cluster.shutdown(); }
@@ -146,8 +144,8 @@
       // get the block
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
-      assertFalse("Data directory does not contain any blocks or there was an "
-          + "IO error", blk==null);
+        assertFalse(blk == null, "Data directory does not contain any blocks or there was an "
+                + "IO error");
 
       // start a third datanode
       cluster.startDataNodes(conf, 1, true, null, null);
@@ -197,14 +195,14 @@
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       File storageDir = cluster.getInstanceStorageDir(0, 0);
       File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("Data directory does not exist", dataDir.exists());
+        assertTrue(dataDir.exists(), "Data directory does not exist");
       ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
       if (blk == null) {
         blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
       }
-      assertFalse("Data directory does not contain any blocks or there was an" +
-          " " +
-          "IO error", blk == null);
+        assertFalse(blk == null, "Data directory does not contain any blocks or there was an" +
+                " " +
+                "IO error");
       ArrayList<DataNode> datanodes = cluster.getDataNodes();
       assertEquals(datanodes.size(), 3);
       FSNamesystem ns = cluster.getNamesystem();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index a7cf68b..b4fc1fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -34,11 +34,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 import static org.mockito.Mockito.doReturn;
 
 import java.io.BufferedReader;
@@ -89,8 +86,8 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.event.Level;
 
 /**
@@ -349,9 +346,9 @@
       //
       Path path = new Path("/");
       System.out.println("Path : \"" + path.toString() + "\"");
-      System.out.println(fs.getFileStatus(path).isDirectory()); 
-      assertTrue("/ should be a directory", 
-                 fs.getFileStatus(path).isDirectory());
+      System.out.println(fs.getFileStatus(path).isDirectory());
+        assertTrue(
+                fs.getFileStatus(path).isDirectory(), "/ should be a directory");
 
       //
       // Create a directory inside /, then try to overwrite it
@@ -363,7 +360,7 @@
       try {
         fs.create(dir1, true); // Create path, overwrite=true
         fs.close();
-        assertTrue("Did not prevent directory from being overwritten.", false);
+          assertTrue(false, "Did not prevent directory from being overwritten.");
       } catch (FileAlreadyExistsException e) {
         // expected
       }
@@ -378,9 +375,9 @@
       dfs.setQuota(file1.getParent(), 100L, blockSize*5);
       FSDataOutputStream stm = createFile(fs, file1, 1);
 
-      // verify that file exists in FS namespace
-      assertTrue(file1 + " should be a file", 
-                 fs.getFileStatus(file1).isFile());
+        // verify that file exists in FS namespace
+        assertTrue(
+                fs.getFileStatus(file1).isFile(), file1 + " should be a file");
       System.out.println("Path : \"" + file1 + "\"");
 
       // write to file
@@ -390,14 +387,14 @@
 
       // verify that file size has changed to the full size
       long len = fs.getFileStatus(file1).getLen();
-      assertTrue(file1 + " should be of size " + fileSize +
-                 " but found to be of size " + len, 
-                  len == fileSize);
+        assertTrue(
+                len == fileSize, file1 + " should be of size " + fileSize +
+                " but found to be of size " + len);
       
       // verify the disk space the file occupied
       long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
-      assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
-          "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
+        assertEquals(fileSize, diskSpace, file1 + " should take " + fileSize + " bytes disk space " +
+                "but found to take " + diskSpace + " bytes");
       
       // Check storage usage 
       // can't check capacities for real storage since the OS file system may be changing under us.
@@ -461,12 +458,12 @@
       fs = cluster.getFileSystem();
       localfs = FileSystem.getLocal(conf);
 
-      assertTrue(file1 + " still exists inspite of deletOnExit set.",
-                 !fs.exists(file1));
-      assertTrue(file2 + " still exists inspite of deletOnExit set.",
-                 !fs.exists(file2));
-      assertTrue(file3 + " still exists inspite of deletOnExit set.",
-                 !localfs.exists(file3));
+        assertTrue(
+                !fs.exists(file1), file1 + " still exists inspite of deletOnExit set.");
+        assertTrue(
+                !fs.exists(file2), file2 + " still exists inspite of deletOnExit set.");
+        assertTrue(
+                !localfs.exists(file3), file3 + " still exists inspite of deletOnExit set.");
       System.out.println("DeleteOnExit successful.");
 
     } finally {
@@ -560,9 +557,9 @@
       Path file1 = new Path("/filestatus.dat");
       FSDataOutputStream stm = createFile(fs, file1, 1);
 
-      // verify that file exists in FS namespace
-      assertTrue(file1 + " should be a file", 
-                 fs.getFileStatus(file1).isFile());
+        // verify that file exists in FS namespace
+        assertTrue(
+                fs.getFileStatus(file1).isFile(), file1 + " should be a file");
       System.out.println("Path : \"" + file1 + "\"");
 
       // kill the datanode
@@ -598,8 +595,8 @@
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
-      assertTrue("Error blocks were not cleaned up",
-                 locations.locatedBlockCount() == 0);
+        assertTrue(
+                locations.locatedBlockCount() == 0, "Error blocks were not cleaned up");
     } finally {
       cluster.shutdown();
       client.close();
@@ -734,14 +731,14 @@
       HdfsDataOutputStream stm = create(fs, file1, 1);
       System.out.println("testFileCreationNamenodeRestart: "
                          + "Created file " + file1);
-      assertEquals(file1 + " should be replicated to 1 datanode.", 1,
-          stm.getCurrentBlockReplication());
+        assertEquals(1,
+                stm.getCurrentBlockReplication(), file1 + " should be replicated to 1 datanode.");
 
       // write two full blocks.
       writeFile(stm, numBlocks * blockSize);
       stm.hflush();
-      assertEquals(file1 + " should still be replicated to 1 datanode.", 1,
-          stm.getCurrentBlockReplication());
+        assertEquals(1,
+                stm.getCurrentBlockReplication(), file1 + " should still be replicated to 1 datanode.");
 
       // rename file wile keeping it open.
       Path fileRenamed = new Path("/filestatusRenamed.dat");
@@ -838,15 +835,15 @@
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
-      assertTrue("Error blocks were not cleaned up for file " + file1,
-                 locations.locatedBlockCount() == 3);
+        assertTrue(
+                locations.locatedBlockCount() == 3, "Error blocks were not cleaned up for file " + file1);
 
       // verify filestatus2.dat
       locations = client.getNamenode().getBlockLocations(
                                   file2.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
-      assertTrue("Error blocks were not cleaned up for file " + file2,
-                 locations.locatedBlockCount() == 1);
+        assertTrue(
+                locations.locatedBlockCount() == 1, "Error blocks were not cleaned up for file " + file2);
     } finally {
       IOUtils.closeStream(fs);
       cluster.shutdown();
@@ -882,9 +879,9 @@
       // This should close all existing file.
       dfsclient.close();
 
-      // reopen file system and verify that file exists.
-      assertTrue(file1 + " does not exist.", 
-          AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
+        // reopen file system and verify that file exists.
+        assertTrue(
+                AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1), file1 + " does not exist.");
     } finally {
       cluster.shutdown();
     }
@@ -926,19 +923,19 @@
     // Create a file when parent dir exists as file, should fail
     expectedException = createNonRecursive(fs, new Path(path, "Create"), 1, createFlag);
 
-    assertTrue("Create a file when parent directory exists as a file"
-        + " should throw ParentNotDirectoryException ",
-        expectedException != null
-            && expectedException instanceof ParentNotDirectoryException);
+      assertTrue(
+              expectedException != null
+                      && expectedException instanceof ParentNotDirectoryException, "Create a file when parent directory exists as a file"
+              + " should throw ParentNotDirectoryException ");
     fs.delete(path, true);
     // Create a file in a non-exist directory, should fail
     final Path path2 = new Path(nonExistDir + "/testCreateNonRecursive");
     expectedException =  createNonRecursive(fs, path2, 1, createFlag);
 
-    assertTrue("Create a file in a non-exist dir using"
-        + " createNonRecursive() should throw FileNotFoundException ",
-        expectedException != null
-            && expectedException instanceof FileNotFoundException);
+      assertTrue(
+              expectedException != null
+                      && expectedException instanceof FileNotFoundException, "Create a file in a non-exist dir using"
+              + " createNonRecursive() should throw FileNotFoundException ");
 
     EnumSet<CreateFlag> overwriteFlag =
       EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
@@ -948,20 +945,20 @@
     // Overwrite a file when parent dir exists as file, should fail
     expectedException = createNonRecursive(fs, new Path(path, "Overwrite"), 1, overwriteFlag);
 
-    assertTrue("Overwrite a file when parent directory exists as a file"
-        + " should throw ParentNotDirectoryException ",
-        expectedException != null
-            && expectedException instanceof ParentNotDirectoryException);
+      assertTrue(
+              expectedException != null
+                      && expectedException instanceof ParentNotDirectoryException, "Overwrite a file when parent directory exists as a file"
+              + " should throw ParentNotDirectoryException ");
     fs.delete(path, true);
 
     // Overwrite a file in a non-exist directory, should fail
     final Path path3 = new Path(nonExistDir + "/testOverwriteNonRecursive");
     expectedException = createNonRecursive(fs, path3, 1, overwriteFlag);
 
-    assertTrue("Overwrite a file in a non-exist dir using"
-        + " createNonRecursive() should throw FileNotFoundException ",
-        expectedException != null
-            && expectedException instanceof FileNotFoundException);
+      assertTrue(
+              expectedException != null
+                      && expectedException instanceof FileNotFoundException, "Overwrite a file in a non-exist dir using"
+              + " createNonRecursive() should throw FileNotFoundException ");
   }
 
   // Attempts to create and close a file using FileSystem.createNonRecursive(),
@@ -1090,8 +1087,8 @@
       out.write("something".getBytes());
       out.hflush();
       int actualRepl = out.getCurrentBlockReplication();
-      assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
-                 actualRepl == DATANODE_NUM);
+        assertTrue(
+                actualRepl == DATANODE_NUM, f + " should be replicated to " + DATANODE_NUM + " datanodes.");
 
       // set the soft and hard limit to be 1 second so that the
       // namenode triggers lease recovery
@@ -1190,7 +1187,7 @@
       } catch (IOException e) {
         hasException = true;
       }
-      assertTrue("Failed to close file after cluster shutdown", hasException);
+        assertTrue(hasException, "Failed to close file after cluster shutdown");
     } finally {
       System.out.println("testFsCloseAfterClusterShutdown successful");
       if (cluster != null) {
@@ -1376,7 +1373,7 @@
       } finally {
         in.close();
       }
-      Assert.assertArrayEquals(newData, result);
+      Assertions.assertArrayEquals(newData, result);
       
       // Case 2: Restart NN, check the file
       cluster.restartNameNode();
@@ -1387,7 +1384,7 @@
       } finally {
         in.close();
       }
-      Assert.assertArrayEquals(newData, result);
+      Assertions.assertArrayEquals(newData, result);
       
       // Case 3: Save new checkpoint and restart NN, check the file
       NameNodeAdapter.enterSafeMode(nn, false);
@@ -1401,7 +1398,7 @@
       } finally {
         in.close();
       }
-      Assert.assertArrayEquals(newData, result);
+      Assertions.assertArrayEquals(newData, result);
     } finally {
       if (dfs != null) {
         dfs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
index 986bb56..41b5b34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -30,7 +30,7 @@
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
index 728fa75..77ff225 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
@@ -24,7 +24,7 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 public class TestFileCreationDelete {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
index fbada20..2373ea5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
@@ -16,14 +16,14 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import java.util.ConcurrentModificationException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test empty file creation.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
index c8420ca..5bd80e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
@@ -23,8 +23,8 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 /** Test the fileLength on cluster restarts */
 public class TestFileLengthOnClusterRestart {
@@ -55,7 +55,7 @@
       in = (HdfsDataInputStream) dfs.open(path, 1024);
       // Verify the length when we just restart NN. DNs will register
       // immediately.
-      Assert.assertEquals(fileLength, in.getVisibleLength());
+      Assertions.assertEquals(fileLength, in.getVisibleLength());
       cluster.shutdownDataNodes();
       cluster.restartNameNode(false);
       // This is just for ensuring NN started.
@@ -63,9 +63,9 @@
 
       try {
         in = (HdfsDataInputStream) dfs.open(path);
-        Assert.fail("Expected IOException");
+        Assertions.fail("Expected IOException");
       } catch (IOException e) {
-        Assert.assertTrue(e.getLocalizedMessage().indexOf(
+        Assertions.assertTrue(e.getLocalizedMessage().indexOf(
             "Name node is in safe mode") >= 0);
       }
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
index a5f8911..cfa83d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -38,16 +35,16 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
  * This class tests the FileStatus API.
  */
 public class TestFileStatus {
-  {
+  static {
     GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.TRACE);
     GenericTestUtils.setLogLevel(FileSystem.LOG, Level.TRACE);
   }
@@ -63,7 +60,7 @@
   private static DFSClient dfsClient;
   private static Path file1;
   
-  @BeforeClass
+  @BeforeAll
   public static void testSetUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2);
@@ -76,7 +73,7 @@
         seed);
   }
   
-  @AfterClass
+  @AfterAll
   public static void testTearDown() throws Exception {
     if (fs != null) {
       fs.close();
@@ -96,13 +93,13 @@
   public void testGetFileInfo() throws IOException {
     // Check that / exists
     Path path = new Path("/");
-    assertTrue("/ should be a directory", 
-               fs.getFileStatus(path).isDirectory());
+      assertTrue(
+              fs.getFileStatus(path).isDirectory(), "/ should be a directory");
     ContractTestUtils.assertNotErasureCoded(fs, path);
 
     // Make sure getFileInfo returns null for files which do not exist
     HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
-    assertEquals("Non-existant file should result in null", null, fileInfo);
+    assertNull(fileInfo, "Non-existant file should result in null");
     
     Path path1 = new Path("/name1");
     Path path2 = new Path("/name1/name2");
@@ -119,8 +116,8 @@
       dfsClient.getFileInfo("non-absolute");
       fail("getFileInfo for a non-absolute path did not throw IOException");
     } catch (RemoteException re) {
-      assertTrue("Wrong exception for invalid file name: "+re,
-          re.toString().contains("Absolute path required"));
+        assertTrue(
+                re.toString().contains("Absolute path required"), "Wrong exception for invalid file name: " + re);
     }
   }
 
@@ -131,7 +128,7 @@
     checkFile(fs, file1, 1);
     // test getFileStatus on a file
     FileStatus status = fs.getFileStatus(file1);
-    assertFalse(file1 + " should be a file", status.isDirectory());
+      assertFalse(status.isDirectory(), file1 + " should be a file");
     assertEquals(blockSize, status.getBlockSize());
     assertEquals(1, status.getReplication());
     assertEquals(fileSize, status.getLen());
@@ -139,9 +136,9 @@
     assertEquals(file1.makeQualified(fs.getUri(),
         fs.getWorkingDirectory()).toString(), 
         status.getPath().toString());
-    assertTrue(file1 + " should have erasure coding unset in " +
-            "FileStatus#toString(): " + status,
-        status.toString().contains("isErasureCoded=false"));
+      assertTrue(
+              status.toString().contains("isErasureCoded=false"), file1 + " should have erasure coding unset in " +
+              "FileStatus#toString(): " + status);
   }
 
   /** Test the FileStatus obtained calling listStatus on a file */
@@ -150,7 +147,7 @@
     FileStatus[] stats = fs.listStatus(file1);
     assertEquals(1, stats.length);
     FileStatus status = stats[0];
-    assertFalse(file1 + " should be a file", status.isDirectory());
+      assertFalse(status.isDirectory(), file1 + " should be a file");
     assertEquals(blockSize, status.getBlockSize());
     assertEquals(1, status.getReplication());
     assertEquals(fileSize, status.getLen());
@@ -162,7 +159,7 @@
     RemoteIterator<FileStatus> itor = fc.listStatus(file1);
     status = itor.next();
     assertEquals(stats[0], status);
-    assertFalse(file1 + " should be a file", status.isDirectory());
+      assertFalse(status.isDirectory(), file1 + " should be a file");
   }
 
   /** Test getting a FileStatus object using a non-existant path */
@@ -186,8 +183,8 @@
       fs.getFileStatus(dir);
       fail("getFileStatus of non-existent path should fail");
     } catch (FileNotFoundException fe) {
-      assertTrue("Exception doesn't indicate non-existant path", 
-          fe.getMessage().startsWith("File does not exist"));
+        assertTrue(
+                fe.getMessage().startsWith("File does not exist"), "Exception doesn't indicate non-existant path");
     }
   }
 
@@ -196,13 +193,13 @@
   public void testGetFileStatusOnDir() throws Exception {
     // Create the directory
     Path dir = new Path("/test/mkdirs");
-    assertTrue("mkdir failed", fs.mkdirs(dir));
-    assertTrue("mkdir failed", fs.exists(dir));
+      assertTrue(fs.mkdirs(dir), "mkdir failed");
+      assertTrue(fs.exists(dir), "mkdir failed");
     
     // test getFileStatus on an empty directory
     FileStatus status = fs.getFileStatus(dir);
-    assertTrue(dir + " should be a directory", status.isDirectory());
-    assertTrue(dir + " should be zero size ", status.getLen() == 0);
+      assertTrue(status.isDirectory(), dir + " should be a directory");
+    assertEquals(0, status.getLen(), dir + " should be zero size ");
     ContractTestUtils.assertNotErasureCoded(fs, dir);
     assertEquals(dir.makeQualified(fs.getUri(),
         fs.getWorkingDirectory()).toString(), 
@@ -210,15 +207,15 @@
     
     // test listStatus on an empty directory
     FileStatus[] stats = fs.listStatus(dir);
-    assertEquals(dir + " should be empty", 0, stats.length);
-    assertEquals(dir + " should be zero size ",
-        0, fs.getContentSummary(dir).getLength());
+    assertEquals(0, stats.length, dir + " should be empty");
+    assertEquals(
+            0, fs.getContentSummary(dir).getLength(), dir + " should be zero size ");
     
     RemoteIterator<FileStatus> itor = fc.listStatus(dir);
-    assertFalse(dir + " should be empty", itor.hasNext());
+    assertFalse(itor.hasNext(), dir + " should be empty");
 
     itor = fs.listStatusIterator(dir);
-    assertFalse(dir + " should be empty", itor.hasNext());
+    assertFalse(itor.hasNext(), dir + " should be empty");
 
     // create another file that is smaller than a block.
     Path file2 = new Path(dir, "filestatus2.dat");
@@ -242,25 +239,25 @@
 
     // Verify that the size of the directory increased by the size 
     // of the two files
-    final int expected = blockSize/2;  
-    assertEquals(dir + " size should be " + expected, 
-        expected, fs.getContentSummary(dir).getLength());
+    final int expected = blockSize/2;
+      assertEquals(
+              expected, fs.getContentSummary(dir).getLength(), dir + " size should be " + expected);
 
     // Test listStatus on a non-empty directory
     stats = fs.listStatus(dir);
-    assertEquals(dir + " should have two entries", 2, stats.length);
+    assertEquals(2, stats.length, dir + " should have two entries");
     assertEquals(file2.toString(), stats[0].getPath().toString());
     assertEquals(file3.toString(), stats[1].getPath().toString());
 
     itor = fc.listStatus(dir);
     assertEquals(file2.toString(), itor.next().getPath().toString());
     assertEquals(file3.toString(), itor.next().getPath().toString());
-    assertFalse("Unexpected addtional file", itor.hasNext());
+    assertFalse(itor.hasNext(), "Unexpected addtional file");
 
     itor = fs.listStatusIterator(dir);
     assertEquals(file2.toString(), itor.next().getPath().toString());
     assertEquals(file3.toString(), itor.next().getPath().toString());
-    assertFalse("Unexpected addtional file", itor.hasNext());
+    assertFalse(itor.hasNext(), "Unexpected addtional file");
 
 
     // Test iterative listing. Now dir has 2 entries, create one more.
@@ -268,7 +265,7 @@
     fs.mkdirs(dir3);
     dir3 = fs.makeQualified(dir3);
     stats = fs.listStatus(dir);
-    assertEquals(dir + " should have three entries", 3, stats.length);
+      assertEquals(3, stats.length, dir + " should have three entries");
     assertEquals(dir3.toString(), stats[0].getPath().toString());
     assertEquals(file2.toString(), stats[1].getPath().toString());
     assertEquals(file3.toString(), stats[2].getPath().toString());
@@ -277,13 +274,13 @@
     assertEquals(dir3.toString(), itor.next().getPath().toString());
     assertEquals(file2.toString(), itor.next().getPath().toString());
     assertEquals(file3.toString(), itor.next().getPath().toString());
-    assertFalse("Unexpected addtional file", itor.hasNext());
+      assertFalse(itor.hasNext(), "Unexpected addtional file");
 
     itor = fs.listStatusIterator(dir);
     assertEquals(dir3.toString(), itor.next().getPath().toString());
     assertEquals(file2.toString(), itor.next().getPath().toString());
     assertEquals(file3.toString(), itor.next().getPath().toString());
-    assertFalse("Unexpected addtional file", itor.hasNext());
+      assertFalse(itor.hasNext(), "Unexpected addtional file");
 
     // Now dir has 3 entries, create two more
     Path dir4 = fs.makeQualified(new Path(dir, "dir4"));
@@ -293,7 +290,7 @@
     fs.mkdirs(dir5);
     dir5 = fs.makeQualified(dir5);
     stats = fs.listStatus(dir);
-    assertEquals(dir + " should have five entries", 5, stats.length);
+      assertEquals(5, stats.length, dir + " should have five entries");
     assertEquals(dir3.toString(), stats[0].getPath().toString());
     assertEquals(dir4.toString(), stats[1].getPath().toString());
     assertEquals(dir5.toString(), stats[2].getPath().toString());
@@ -325,7 +322,7 @@
     try {
       itor.hasNext();
       fail("FileNotFoundException expected");
-    } catch (FileNotFoundException fnfe) {
+    } catch (FileNotFoundException ignored) {
     }
 
     fs.mkdirs(file2);
@@ -340,7 +337,7 @@
         count++;
       }
       fail("FileNotFoundException expected");
-    } catch (FileNotFoundException fnfe) {
+    } catch (FileNotFoundException ignored) {
     }
     assertEquals(2, count);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
index 444c0ec..9077eb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusSerialization.java
@@ -35,9 +35,9 @@
 import org.apache.hadoop.io.DataOutputBuffer;
 
 import org.apache.hadoop.thirdparty.protobuf.ByteString;
+import org.junit.jupiter.api.Test;
 
-import org.junit.Test;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify compatible FileStatus/HdfsFileStatus serialization.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
index a57777a..3066d39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
@@ -17,23 +17,24 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
+import java.io.IOException;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 /**
  * This test ensures the statuses of EC files with the default policy.
  */
@@ -45,7 +46,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void before() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     cluster =
@@ -56,7 +57,7 @@
     fs.enableErasureCodingPolicy(getEcPolicy().getName());
   }
 
-  @After
+  @AfterEach
   public void after() {
     if (cluster != null) {
       cluster.shutdown();
@@ -89,7 +90,7 @@
     final ErasureCodingPolicy ecPolicy2 =
         client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
     assertNotNull(ecPolicy2);
-    assertTrue(ecPolicy1.equals(ecPolicy2));
+    assertEquals(ecPolicy1, ecPolicy2);
 
     // test file with EC policy
     fs.create(file).close();
@@ -97,11 +98,11 @@
         fs.getClient().getFileInfo(file.toUri().getPath())
             .getErasureCodingPolicy();
     assertNotNull(ecPolicy3);
-    assertTrue(ecPolicy1.equals(ecPolicy3));
+    assertEquals(ecPolicy1, ecPolicy3);
     ContractTestUtils.assertErasureCoded(fs, file);
     FileStatus status = fs.getFileStatus(file);
-    assertTrue(file + " should have erasure coding set in " +
-            "FileStatus#toString(): " + status,
-        status.toString().contains("isErasureCoded=true"));
+    assertTrue(status.toString().contains("isErasureCoded=true"),
+        file + " should have erasure coding set in "
+            + "FileStatus#toString(): " + status);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
index 7aa9f23..0811f0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -37,7 +37,7 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test covers privilege related aspects of FsShell
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index 1ee166e..84e7be4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.net.InetSocketAddress;
 import java.util.Collection;
@@ -56,8 +56,7 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.LambdaTestUtils;
-
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -118,8 +117,8 @@
     List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode()
         .getNamesystem().getBlockManager().getDatanodeManager()
         .getDatanodeListForReport(DatanodeReportType.LIVE);
-    assertEquals("Unexpected number of datanodes", NUM_DATA_NODES,
-        nodeInfoList.size());
+      assertEquals(NUM_DATA_NODES,
+              nodeInfoList.size(), "Unexpected number of datanodes");
     FileSystem fileSys = cluster.getFileSystem();
     FSDataOutputStream stm = null;
     try {
@@ -323,18 +322,18 @@
     String dId = cluster.getDataNodes().get(0).getDatanodeUuid();
     DatanodeDescriptor dnd = BlockManagerTestUtil.getDatanode(ns, dId);
     DatanodeStorageInfo[] storages = dnd.getStorageInfos();
-    assertEquals("DataNode should have 4 storages", 4, storages.length);
+      assertEquals(4, storages.length, "DataNode should have 4 storages");
 
     Iterator<BlockInfo> dnBlockIt = null;
     // check illegal start block number
     try {
       dnBlockIt = BlockManagerTestUtil.getBlockIterator(
           cluster.getNamesystem(), dId, -1);
-      assertTrue("Should throw IllegalArgumentException", false);
+        assertTrue(false, "Should throw IllegalArgumentException");
     } catch(IllegalArgumentException ei) {
       // as expected
     }
-    assertNull("Iterator should be null", dnBlockIt);
+      assertNull(dnBlockIt, "Iterator should be null");
 
     // form an array of all DataNode blocks
     int numBlocks = dnd.numBlocks();
@@ -347,8 +346,8 @@
         allBlocks[idx++] = storageBlockIt.next();
         try {
           storageBlockIt.remove();
-          assertTrue(
-              "BlockInfo iterator should have been unmodifiable", false);
+            assertTrue(false,
+                    "BlockInfo iterator should have been unmodifiable");
         } catch (UnsupportedOperationException e) {
           //expected exception
         }
@@ -359,17 +358,17 @@
     for(int i = 0; i < allBlocks.length; i++) {
       // create iterator starting from i
       dnBlockIt = BlockManagerTestUtil.getBlockIterator(ns, dId, i);
-      assertTrue("Block iterator should have next block", dnBlockIt.hasNext());
+        assertTrue(dnBlockIt.hasNext(), "Block iterator should have next block");
       // check iterator lists blocks in the desired order
       for(int j = i; j < allBlocks.length; j++) {
-        assertEquals("Wrong block order", allBlocks[j], dnBlockIt.next());
+          assertEquals(allBlocks[j], dnBlockIt.next(), "Wrong block order");
       }
     }
 
     // check start block number larger than numBlocks in the DataNode
     dnBlockIt = BlockManagerTestUtil.getBlockIterator(
         ns, dId, allBlocks.length + 1);
-    assertFalse("Iterator should not have next block", dnBlockIt.hasNext());
+      assertFalse(dnBlockIt.hasNext(), "Iterator should not have next block");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
index cf61e84..ae569b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
@@ -25,10 +25,10 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestGetFileChecksum {
   private static final int BLOCKSIZE = 1024;
@@ -38,7 +38,7 @@
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -48,7 +48,7 @@
     dfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -69,7 +69,7 @@
 
     for (int i = 0; i < appendRounds + 1; i++) {
       FileChecksum checksum = dfs.getFileChecksum(foo, appendLength * (i+1));
-      Assert.assertTrue(checksum.equals(fc[i]));
+      Assertions.assertTrue(checksum.equals(fc[i]));
     }
   }
 
@@ -82,7 +82,7 @@
       fail("getFileChecksum should fail for files "
           + "with blocks under construction");
     } catch (IOException ie) {
-      Assert.assertTrue(ie.getMessage().contains(
+      Assertions.assertTrue(ie.getMessage().contains(
           "Fail to get checksum, since file /testFile "
               + "is under construction."));
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java
index 45ccefa..2e888a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java
@@ -23,13 +23,13 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
index 6da46de..04699ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
@@ -27,16 +27,16 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
   
   private MiniDFSCluster cluster;
   private String defaultWorkingDirectory;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
@@ -49,7 +49,7 @@
            UserGroupInformation.getCurrentUser().getShortUserName();
   }
   
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
index 0a74a2d..394036e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -34,9 +35,9 @@
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.util.Sets;
 
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.TestName;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -74,7 +75,7 @@
 
   private final Class<?> rpcServerClass;
 
-  @BeforeClass
+  @BeforeAll
   public static void initialize() {
     Service[] services = new HDFSPolicyProvider().getServices();
     policyProviderProtocols = new HashSet<>(services.length);
@@ -109,15 +110,15 @@
     LOG.info("Running test {} for RPC server {}.  Found server protocols {} "
         + "and policy provider protocols {}.", testName.getMethodName(),
         rpcServerClass.getName(), serverProtocols, policyProviderProtocols);
-    assertFalse("Expected to find at least one protocol in server.",
-        serverProtocols.isEmpty());
+      assertFalse(
+              serverProtocols.isEmpty(), "Expected to find at least one protocol in server.");
     final Set<Class<?>> differenceSet =
         Sets.difference(serverProtocols, policyProviderProtocols);
-    assertTrue(
-        String.format("Following protocols for server %s are not defined in "
-            + "%s: %s",
-            rpcServerClass.getName(), HDFSPolicyProvider.class.getName(),
-            Arrays.toString(differenceSet.toArray())),
-        differenceSet.isEmpty());
+      assertTrue(
+              differenceSet.isEmpty(),
+              String.format("Following protocols for server %s are not defined in "
+                      + "%s: %s",
+                      rpcServerClass.getName(), HDFSPolicyProvider.class.getName(),
+                      Arrays.toString(differenceSet.toArray())));
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index c26c648..8d3ea38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -36,8 +36,8 @@
 import java.net.UnknownHostException;
 
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * This test checks correctness of port usage by hdfs components:
@@ -280,7 +280,7 @@
       started = canStartNameNode(conf2);
 
       if (withService) {
-        assertFalse("Should've failed on service port", started);
+          assertFalse(started, "Should've failed on service port");
 
         // reset conf2 since NameNode modifies it
         FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
@@ -379,8 +379,8 @@
         LOG.info("= Starting 1 on: " + backup_config.get(
             DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
-        assertFalse("Backup started on same port as Namenode", 
-                           canStartBackupNode(backup_config)); // should fail
+          assertFalse(
+                  canStartBackupNode(backup_config), "Backup started on same port as Namenode"); // should fail
 
         // reset namenode backup address because Windows does not release
         // port used previously properly.
@@ -394,7 +394,7 @@
             DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
         boolean started = canStartBackupNode(backup_config);
-        assertTrue("Backup Namenode should've started", started); // should start now
+          assertTrue(started, "Backup Namenode should've started"); // should start now
       } finally {
         stopNameNode(nn);
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
index 5dbb124..3b922bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.UUID;
@@ -36,9 +34,9 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 /**
@@ -65,7 +63,7 @@
   private static UserGroupInformation user1;
   private static UserGroupInformation user2;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fs = FileSystem.get(conf);
@@ -92,7 +90,7 @@
         null, FsAction.ALL, FsAction.ALL, FsAction.ALL);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) { cluster.shutdown(); }
   }
@@ -141,13 +139,13 @@
     fs.mkdirs(user1Tmp);
     Trash u1Trash = getPerUserTrash(user1, fs, testConf);
     Path u1t = u1Trash.getCurrentTrashDir(user1Tmp);
-    assertTrue(String.format("Failed to move %s to trash", user1Tmp),
-        u1Trash.moveToTrash(user1Tmp));
-    assertTrue(
-        String.format(
-            "%s should be allowed to remove its own trash directory %s",
-            user1.getUserName(), u1t),
-        fs.delete(u1t, true));
+      assertTrue(
+              u1Trash.moveToTrash(user1Tmp), String.format("Failed to move %s to trash", user1Tmp));
+      assertTrue(
+              fs.delete(u1t, true),
+              String.format(
+                      "%s should be allowed to remove its own trash directory %s",
+                      user1.getUserName(), u1t));
     assertFalse(fs.exists(u1t));
 
     // login as user2, move something to trash
@@ -165,8 +163,8 @@
               USER1_NAME, USER2_NAME));
     } catch (AccessControlException e) {
       assertTrue(e instanceof AccessControlException);
-      assertTrue("Permission denied messages must carry the username",
-          e.getMessage().contains(USER1_NAME));
+        assertTrue(
+                e.getMessage().contains(USER1_NAME), "Permission denied messages must carry the username");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
index 711291c..8844c4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -35,7 +33,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /** Class contains a set of tests to verify the correctness of 
@@ -337,9 +335,9 @@
         // Check file length if updatelength is required
         if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) {
           long currentFileLength = fileSystem.getFileStatus(path).getLen();
-          assertEquals(
-            "File size doesn't match for hsync/hflush with updating the length",
-            tenth * (i + 1), currentFileLength);
+            assertEquals(
+                    tenth * (i + 1), currentFileLength,
+                    "File size doesn't match for hsync/hflush with updating the length");
         } else if (isSync && syncFlags.contains(SyncFlag.END_BLOCK)) {
           LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(pathName, 0);
           assertEquals(i + 1, blocks.getLocatedBlocks().size());
@@ -353,7 +351,7 @@
         is.seek(tenth * i);
         int readBytes = is.read(toRead, 0, tenth);
         System.out.println("Has read " + readBytes);
-        assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth));
+          assertTrue((readBytes > 0) && (readBytes <= tenth), "Should've get more bytes");
         is.close();
         checkData(toRead, 0, readBytes, expected, "Partial verification");
       }
@@ -361,7 +359,7 @@
       stm.write(fileContent, tenth * SECTIONS, rounding);
       stm.close();
 
-      assertEquals("File size doesn't match ", AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen());
+        assertEquals(AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen(), "File size doesn't match ");
       AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
     } finally {
       fileSystem.close();
@@ -371,9 +369,9 @@
   static void checkData(final byte[] actual, int from, int len,
                         final byte[] expected, String message) {
     for (int idx = 0; idx < len; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                   expected[from+idx]+" actual "+actual[idx],
-                   expected[from+idx], actual[idx]);
+        assertEquals(
+                expected[from + idx], actual[idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
index a7afa66..51ab312 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -46,10 +44,10 @@
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.util.Sets;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestHdfsAdmin {
   
@@ -60,7 +58,7 @@
   private final Configuration conf = new Configuration();
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setUpCluster() throws IOException {
     conf.setLong(
         DFSConfigKeys.DFS_NAMENODE_LIST_OPENFILES_NUM_RESPONSES,
@@ -69,7 +67,7 @@
     cluster.waitActive();
   }
   
-  @After
+  @AfterEach
   public void shutDownCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -182,9 +180,9 @@
       policyNamesSet2.add(policy.getName());
     }
     // Ensure that we got the same set of policies in both cases.
-    Assert.assertTrue(
+    Assertions.assertTrue(
         Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
-    Assert.assertTrue(
+    Assertions.assertTrue(
         Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
   }
 
@@ -200,8 +198,8 @@
   @Test
   public void testGetKeyProvider() throws IOException {
     HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
-    Assert.assertNull("should return null for an non-encrypted cluster",
-        hdfsAdmin.getKeyProvider());
+      Assertions.assertNull(
+              hdfsAdmin.getKeyProvider(), "should return null for an non-encrypted cluster");
 
     shutDownCluster();
 
@@ -213,8 +211,8 @@
     cluster.waitActive();
     hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
 
-    Assert.assertNotNull("should not return null for an encrypted cluster",
-        hdfsAdmin.getKeyProvider());
+      Assertions.assertNotNull(
+              hdfsAdmin.getKeyProvider(), "should not return null for an encrypted cluster");
   }
 
   @Test(timeout = 120000L)
@@ -260,10 +258,10 @@
       HashSet<Path> openFiles) throws IOException {
     while (openFilesRemoteItr.hasNext()) {
       String filePath = openFilesRemoteItr.next().getFilePath();
-      assertFalse(filePath + " should not be listed under open files!",
-          closedFiles.contains(new Path(filePath)));
-      assertTrue(filePath + " is not listed under open files!",
-          openFiles.remove(new Path(filePath)));
+        assertFalse(
+                closedFiles.contains(new Path(filePath)), filePath + " should not be listed under open files!");
+        assertTrue(
+                openFiles.remove(new Path(filePath)), filePath + " is not listed under open files!");
     }
   }
 
@@ -275,7 +273,7 @@
         hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
             OpenFilesIterator.FILTER_PATH_DEFAULT);
     verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
-    assertTrue("Not all open files are listed!", openFiles.isEmpty());
+      assertTrue(openFiles.isEmpty(), "Not all open files are listed!");
   }
 
   /**
@@ -289,6 +287,6 @@
     RemoteIterator<OpenFileEntry> openFilesRemoteItr =
         hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
     verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
-    assertTrue("Not all open files are listed!", openFiles.isEmpty());
+      assertTrue(openFiles.isEmpty(), "Not all open files are listed!");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
index a8affa2..d8893db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -38,7 +38,7 @@
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.Time;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
index 2e4a08b..554326b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -45,9 +45,9 @@
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test cases to verify that client side translators correctly implement the
@@ -59,7 +59,7 @@
   private static InetSocketAddress nnAddress = null;
   private static InetSocketAddress dnAddress = null;
   
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     cluster = (new MiniDFSCluster.Builder(conf))
         .numDataNodes(1).build();
@@ -69,7 +69,7 @@
                                       dn.getIpcPort());
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java
index 9fc6b38..cf3d9bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java
@@ -25,8 +25,8 @@
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 public class TestKeyProviderCache {
 
@@ -98,31 +98,31 @@
         "dummy://foo:bar@test_provider1");
     KeyProvider keyProvider1 = kpCache.get(conf,
         getKeyProviderUriFromConf(conf));
-    Assert.assertNotNull("Returned Key Provider is null !!", keyProvider1);
+      Assertions.assertNotNull(keyProvider1, "Returned Key Provider is null !!");
 
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "dummy://foo:bar@test_provider1");
     KeyProvider keyProvider2 = kpCache.get(conf,
         getKeyProviderUriFromConf(conf));
 
-    Assert.assertTrue("Different KeyProviders returned !!",
-        keyProvider1 == keyProvider2);
+      Assertions.assertTrue(
+              keyProvider1 == keyProvider2, "Different KeyProviders returned !!");
 
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "dummy://test_provider3");
     KeyProvider keyProvider3 = kpCache.get(conf,
         getKeyProviderUriFromConf(conf));
 
-    Assert.assertFalse("Same KeyProviders returned !!",
-        keyProvider1 == keyProvider3);
+      Assertions.assertFalse(
+              keyProvider1 == keyProvider3, "Same KeyProviders returned !!");
 
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "dummy://hello:there@test_provider1");
     KeyProvider keyProvider4 = kpCache.get(conf,
         getKeyProviderUriFromConf(conf));
 
-    Assert.assertFalse("Same KeyProviders returned !!",
-        keyProvider1 == keyProvider4);
+      Assertions.assertFalse(
+              keyProvider1 == keyProvider4, "Same KeyProviders returned !!");
 
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
index 137571c..38d160a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -135,13 +135,13 @@
       if (verifyData) {
         // verify data read
         if (thisread == readSize) {
-          assertTrue("file is corrupted at or after byte " +
-              (fileSize - bytesToRead), Arrays.equals(b, compb));
+            assertTrue(Arrays.equals(b, compb), "file is corrupted at or after byte " +
+                    (fileSize - bytesToRead));
         } else {
           // b was only partially filled by last read
           for (int k = 0; k < thisread; k++) {
-            assertTrue("file is corrupted at or after byte " +
-                (fileSize - bytesToRead), b[k] == compb[k]);
+              assertTrue(b[k] == compb[k], "file is corrupted at or after byte " +
+                      (fileSize - bytesToRead));
           }
         }
       }
@@ -189,9 +189,9 @@
           fileSize +
           " blocksize " + blockSize);
 
-      // verify that file exists in FS namespace
-      assertTrue(file1 + " should be a file", 
-                  fs.getFileStatus(file1).isFile());
+        // verify that file exists in FS namespace
+        assertTrue(
+                fs.getFileStatus(file1).isFile(), file1 + " should be a file");
 
       // write to file
       writeFile(stm, fileSize);
@@ -206,9 +206,9 @@
 
       // verify that file size has changed
       long len = fs.getFileStatus(file1).getLen();
-      assertTrue(file1 + " should be of size " +  fileSize +
-                 " but found to be of size " + len, 
-                  len == fileSize);
+        assertTrue(
+                len == fileSize, file1 + " should be of size " +  fileSize +
+                " but found to be of size " + len);
 
     } finally {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
index 5d7b62a..28bae9a 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
@@ -50,8 +50,8 @@
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 public class TestLease {
@@ -108,7 +108,7 @@
         d_out.write(buf, 0, 1024);
         LOG.info("Write worked beyond the soft limit as expected.");
       } catch (IOException e) {
-        Assert.fail("Write failed.");
+        Assertions.fail("Write failed.");
       }
 
       long hardlimit = conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
@@ -121,14 +121,14 @@
       try {
         d_out.write(buf, 0, 1024);
         d_out.close();
-        Assert.fail("Write did not fail even after the fatal lease renewal failure");
+        Assertions.fail("Write did not fail even after the fatal lease renewal failure");
       } catch (IOException e) {
         LOG.info("Write failed as expected. ", e);
       }
 
       // If aborted, the renewer should be empty. (no reference to clients)
       Thread.sleep(1000);
-      Assert.assertTrue(originalRenewer.isEmpty());
+      Assertions.assertTrue(originalRenewer.isEmpty());
 
       // unstub
       doNothing().when(spyNN).renewLease(anyString());
@@ -137,12 +137,12 @@
       try {
         int num = c_in.read(buf, 0, 1);
         if (num != 1) {
-          Assert.fail("Failed to read 1 byte");
+          Assertions.fail("Failed to read 1 byte");
         }
         c_in.close();
       } catch (IOException e) {
          LOG.error("Read failed with ", e);
-         Assert.fail("Read after lease renewal failure failed");
+         Assertions.fail("Read after lease renewal failure failed");
       }
 
       // new file writes should work.
@@ -152,7 +152,7 @@
         c_out.close();
       } catch (IOException e) {
         LOG.error("Write failed with ", e);
-        Assert.fail("Write failed");
+        Assertions.fail("Write failed");
       }
     } finally {
       cluster.shutdown();
@@ -172,8 +172,8 @@
       FSDataOutputStream out = fs.create(p);
       out.writeBytes("something");
       //out.hsync();
-      Assert.assertTrue(hasLease(cluster, p));
-      Assert.assertEquals(1, leaseCount(cluster));
+      Assertions.assertTrue(hasLease(cluster, p));
+      Assertions.assertEquals(1, leaseCount(cluster));
       
       // just to ensure first fs doesn't have any logic to twiddle leases
       DistributedFileSystem fs2 = (DistributedFileSystem) FileSystem.newInstance(fs.getUri(), fs.getConf());
@@ -183,24 +183,24 @@
       Path pRenamed = new Path(d, p.getName());
       fs2.mkdirs(d);
       fs2.rename(p, pRenamed);
-      Assert.assertFalse(p+" exists", fs2.exists(p));
-      Assert.assertTrue(pRenamed+" not found", fs2.exists(pRenamed));
-      Assert.assertFalse("has lease for "+p, hasLease(cluster, p));
-      Assert.assertTrue("no lease for "+pRenamed, hasLease(cluster, pRenamed));
-      Assert.assertEquals(1, leaseCount(cluster));
+        Assertions.assertFalse(fs2.exists(p), p + " exists");
+        Assertions.assertTrue(fs2.exists(pRenamed), pRenamed + " not found");
+        Assertions.assertFalse(hasLease(cluster, p), "has lease for " + p);
+        Assertions.assertTrue(hasLease(cluster, pRenamed), "no lease for " + pRenamed);
+      Assertions.assertEquals(1, leaseCount(cluster));
     
       // rename the parent dir to a new non-existent dir
       LOG.info("DMS: rename parent dir");
       Path pRenamedAgain = new Path(d2, pRenamed.getName());
       fs2.rename(d, d2);
-      // src gone
-      Assert.assertFalse(d+" exists", fs2.exists(d));
-      Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
-      // dst checks
-      Assert.assertTrue(d2+" not found", fs2.exists(d2));
-      Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
-      Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
-      Assert.assertEquals(1, leaseCount(cluster));
+        // src gone
+        Assertions.assertFalse(fs2.exists(d), d + " exists");
+        Assertions.assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
+        // dst checks
+        Assertions.assertTrue(fs2.exists(d2), d2 + " not found");
+        Assertions.assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+        Assertions.assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+      Assertions.assertEquals(1, leaseCount(cluster));
 
       // rename the parent dir to existing dir
       // NOTE: rename w/o options moves paths into existing dir
@@ -209,41 +209,41 @@
       pRenamedAgain = new Path(new Path(d, d2.getName()), p.getName());      
       fs2.mkdirs(d);
       fs2.rename(d2, d);
-      // src gone
-      Assert.assertFalse(d2+" exists", fs2.exists(d2));
-      Assert.assertFalse("no lease for "+pRenamed, hasLease(cluster, pRenamed));
-      // dst checks
-      Assert.assertTrue(d+" not found", fs2.exists(d));
-      Assert.assertTrue(pRenamedAgain +" not found", fs2.exists(pRenamedAgain));
-      Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
-      Assert.assertEquals(1, leaseCount(cluster));
+        // src gone
+        Assertions.assertFalse(fs2.exists(d2), d2 + " exists");
+        Assertions.assertFalse(hasLease(cluster, pRenamed), "no lease for " + pRenamed);
+        // dst checks
+        Assertions.assertTrue(fs2.exists(d), d + " not found");
+        Assertions.assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+        Assertions.assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+      Assertions.assertEquals(1, leaseCount(cluster));
       
       // rename with opts to non-existent dir
       pRenamed = pRenamedAgain;
       pRenamedAgain = new Path(d2, p.getName());
       fs2.rename(pRenamed.getParent(), d2, Options.Rename.OVERWRITE);
-      // src gone
-      Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
-      Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
-      // dst checks
-      Assert.assertTrue(d2+" not found", fs2.exists(d2));
-      Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
-      Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
-      Assert.assertEquals(1, leaseCount(cluster));
+        // src gone
+        Assertions.assertFalse(fs2.exists(pRenamed.getParent()), pRenamed.getParent() + " not found");
+        Assertions.assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
+        // dst checks
+        Assertions.assertTrue(fs2.exists(d2), d2 + " not found");
+        Assertions.assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+        Assertions.assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+      Assertions.assertEquals(1, leaseCount(cluster));
 
       // rename with opts to existing dir
       // NOTE: rename with options will not move paths into the existing dir
       pRenamed = pRenamedAgain;
       pRenamedAgain = new Path(d, p.getName());
       fs2.rename(pRenamed.getParent(), d, Options.Rename.OVERWRITE);
-      // src gone
-      Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
-      Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
-      // dst checks
-      Assert.assertTrue(d+" not found", fs2.exists(d));
-      Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
-      Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
-      Assert.assertEquals(1, leaseCount(cluster));
+        // src gone
+        Assertions.assertFalse(fs2.exists(pRenamed.getParent()), pRenamed.getParent() + " not found");
+        Assertions.assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
+        // dst checks
+        Assertions.assertTrue(fs2.exists(d), d + " not found");
+        Assertions.assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+        Assertions.assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+      Assertions.assertEquals(1, leaseCount(cluster));
       out.close();
     } finally {
       cluster.shutdown();
@@ -269,8 +269,8 @@
       FileSystem fs = cluster.getFileSystem();
       FSDataOutputStream out1 = fs.create(path1);
       out1.writeBytes(contents1);
-      Assert.assertTrue(hasLease(cluster, path1));
-      Assert.assertEquals(1, leaseCount(cluster));
+      Assertions.assertTrue(hasLease(cluster, path1));
+      Assertions.assertEquals(1, leaseCount(cluster));
 
       DistributedFileSystem fs2 = (DistributedFileSystem)
           FileSystem.newInstance(fs.getUri(), fs.getConf());
@@ -281,14 +281,14 @@
       out2.close();
 
       // The first file should still be open and valid
-      Assert.assertTrue(hasLease(cluster, path2));
+      Assertions.assertTrue(hasLease(cluster, path2));
       out1.close();
 
       // Contents should be as expected
       DistributedFileSystem fs3 = (DistributedFileSystem)
           FileSystem.newInstance(fs.getUri(), fs.getConf());
-      Assert.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
-      Assert.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
+      Assertions.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
+      Assertions.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
     } finally {
       cluster.shutdown();
     }
@@ -299,7 +299,7 @@
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     try {
       FileSystem fs = cluster.getFileSystem();
-      Assert.assertTrue(fs.mkdirs(dir));
+      Assertions.assertTrue(fs.mkdirs(dir));
       
       Path a = new Path(dir, "a");
       Path b = new Path(dir, "b");
@@ -307,30 +307,30 @@
       DataOutputStream a_out = fs.create(a);
       a_out.writeBytes("something");
 
-      Assert.assertTrue(hasLease(cluster, a));
-      Assert.assertTrue(!hasLease(cluster, b));
+      Assertions.assertTrue(hasLease(cluster, a));
+      Assertions.assertTrue(!hasLease(cluster, b));
       
       DataOutputStream b_out = fs.create(b);
       b_out.writeBytes("something");
 
-      Assert.assertTrue(hasLease(cluster, a));
-      Assert.assertTrue(hasLease(cluster, b));
+      Assertions.assertTrue(hasLease(cluster, a));
+      Assertions.assertTrue(hasLease(cluster, b));
 
       a_out.close();
       b_out.close();
 
-      Assert.assertTrue(!hasLease(cluster, a));
-      Assert.assertTrue(!hasLease(cluster, b));
+      Assertions.assertTrue(!hasLease(cluster, a));
+      Assertions.assertTrue(!hasLease(cluster, b));
 
       Path fileA = new Path(dir, "fileA");
       FSDataOutputStream fileA_out = fs.create(fileA);
       fileA_out.writeBytes("something");
-      Assert.assertTrue("Failed to get the lease!", hasLease(cluster, fileA));
+        Assertions.assertTrue(hasLease(cluster, fileA), "Failed to get the lease!");
 
       fs.delete(dir, true);
       try {
         fileA_out.hflush();
-        Assert.fail("Should validate file existence!");
+        Assertions.fail("Should validate file existence!");
       } catch (FileNotFoundException e) {
         // expected
         GenericTestUtils.assertExceptionContains("File does not exist", e);
@@ -380,17 +380,17 @@
     FSDataOutputStream out1 = createFsOut(c1, "/out1");
     final DFSClient c2 = createDFSClientAs(ugi[0], conf);
     FSDataOutputStream out2 = createFsOut(c2, "/out2");
-    Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
+    Assertions.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
     final DFSClient c3 = createDFSClientAs(ugi[1], conf);
     FSDataOutputStream out3 = createFsOut(c3, "/out3");
-    Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
+    Assertions.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
     final DFSClient c4 = createDFSClientAs(ugi[1], conf);
     FSDataOutputStream out4 = createFsOut(c4, "/out4");
-    Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
+    Assertions.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
     final DFSClient c5 = createDFSClientAs(ugi[2], conf);
     FSDataOutputStream out5 = createFsOut(c5, "/out5");
-    Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
-    Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
+    Assertions.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
+    Assertions.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
   }
   
   private FSDataOutputStream createFsOut(DFSClient dfs, String path) 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
index e2c956e..b7d61fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
@@ -16,11 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -57,8 +53,8 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 public class TestLeaseRecovery {
   static final int BLOCK_SIZE = 1024;
@@ -67,7 +63,7 @@
 
   private MiniDFSCluster cluster;
 
-  @After
+  @AfterEach
   public void shutdown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -172,7 +168,7 @@
     waitLeaseRecovery(cluster);
     // verify that we still cannot recover the lease
     LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
-    assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
+      assertTrue(lm.countLease() == 1, "Found " + lm.countLease() + " lease, expected 1");
     cluster.getNameNodeRpc().setSafeMode(
         HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
   }
@@ -232,7 +228,7 @@
     while (++count < 10 && !newdfs.recoverLease(file)) {
       Thread.sleep(1000);
     }
-    assertTrue("File should be closed", newdfs.recoverLease(file));
+      assertTrue(newdfs.recoverLease(file), "File should be closed");
 
     // Verify file length after lease recovery. The new file length should not
     // include the bytes with corrupted checksum.
@@ -281,8 +277,8 @@
     while (count++ < 15 && !newDfs.recoverLease(file)) {
       Thread.sleep(1000);
     }
-    // The lease should have been recovered.
-    assertTrue("File should be closed", newDfs.recoverLease(file));
+      // The lease should have been recovered.
+      assertTrue(newDfs.recoverLease(file), "File should be closed");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index bfa3dea..8c07314 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.spy;
 
@@ -53,10 +50,10 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.slf4j.event.Level;
 
@@ -92,7 +89,7 @@
    * 
    * @throws IOException
    */
-  @Before
+  @BeforeEach
   public void startUp() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -109,7 +106,7 @@
    * stop the cluster
    * @throws IOException
    */
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       IOUtils.closeStream(dfs);
@@ -320,11 +317,11 @@
     AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "
         + "Validating its contents now...");
 
-    // verify that file-size matches
-    assertTrue("File should be " + size + " bytes, but is actually " +
-               " found to be " + dfs.getFileStatus(filepath).getLen() +
-               " bytes",
-               dfs.getFileStatus(filepath).getLen() == size);
+      // verify that file-size matches
+      assertTrue(
+              dfs.getFileStatus(filepath).getLen() == size, "File should be " + size + " bytes, but is actually " +
+              " found to be " + dfs.getFileStatus(filepath).getLen() +
+              " bytes");
 
     // verify that there is enough data to read.
     System.out.println("File size is good. Now validating sizes from datanodes...");
@@ -471,8 +468,8 @@
 
     // verify that file-size matches
     long fileSize = dfs.getFileStatus(filepath).getLen();
-    assertTrue("File should be " + size + " bytes, but is actually " +
-        " found to be " + fileSize + " bytes", fileSize == size);
+      assertTrue(fileSize == size, "File should be " + size + " bytes, but is actually " +
+              " found to be " + fileSize + " bytes");
 
     // verify data
     AppendTestUtil.LOG.info("File size is good. " +
@@ -529,10 +526,10 @@
     
     String originalLeaseHolder = NameNodeAdapter.getLeaseHolderForPath(
         cluster.getNameNode(), fileStr);
-    
-    assertFalse("original lease holder should not be the NN",
-        originalLeaseHolder.startsWith(
-        HdfsServerConstants.NAMENODE_LEASE_HOLDER));
+
+      assertFalse(
+              originalLeaseHolder.startsWith(
+                      HdfsServerConstants.NAMENODE_LEASE_HOLDER), "original lease holder should not be the NN");
 
     // hflush file
     AppendTestUtil.LOG.info("hflush");
@@ -540,7 +537,7 @@
     
     // check visible length
     final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
-    Assert.assertEquals(size, in.getVisibleLength());
+    Assertions.assertEquals(size, in.getVisibleLength());
     in.close();
     
     if (doRename) {
@@ -630,10 +627,10 @@
     final String holder = NameNodeAdapter.getLeaseHolderForPath(
         cluster.getNameNode(), f); 
     if (size == 0) {
-      assertEquals("lease holder should null, file is closed", null, holder);
+        assertEquals(null, holder, "lease holder should null, file is closed");
     } else {
-      assertTrue("lease holder should now be the NN",
-          holder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER));
+        assertTrue(
+                holder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER), "lease holder should now be the NN");
     }
     
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 5aa1f63..31e2ac7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -35,10 +35,10 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -87,7 +87,7 @@
   final Path p = new Path(dir, "testfile");
   private final int testFileLength = (stripesPerBlock - 1) * stripeSize;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
@@ -103,7 +103,7 @@
     dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -183,7 +183,7 @@
         String msg = "failed testCase at i=" + i + ", blockLengths="
             + blockLengths + "\n"
             + StringUtils.stringifyException(e);
-        Assert.fail(msg);
+        Assertions.fail(msg);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
index 7bb624e..c3a3877 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
@@ -19,8 +19,8 @@
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.TestListFiles;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * This class tests the FileStatus API.
@@ -29,7 +29,7 @@
 
   private static MiniDFSCluster cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void testSetUp() throws Exception {
     setTestPaths(new Path("/tmp/TestListFilesInDFS"));
     cluster = new MiniDFSCluster.Builder(conf).build();
@@ -37,7 +37,7 @@
     fs.delete(TEST_DIR, true);
   }
   
-  @AfterClass
+  @AfterAll
   public static void testShutdown() throws Exception {
     if (cluster != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
index 2f73a39..68d1d5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -34,10 +32,10 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the FileStatus API.
@@ -56,7 +54,7 @@
   final private static Path FILE2 = new Path(DIR1, "file2");
   final private static Path FILE3 = new Path(DIR1, "file3");
 
-  @BeforeClass
+  @BeforeAll
   public static void testSetUp() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).build();
     fc = FileContext.getFileContext(cluster.getConfiguration(0));
@@ -75,7 +73,7 @@
     stm.close();
   }
   
-  @AfterClass
+  @AfterAll
   public static void testShutdown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -106,7 +104,7 @@
     assertEquals(1, stat.getBlockLocations().length);
   }
 
-  @After
+  @AfterEach
   public void cleanDir() throws IOException {
     fc.delete(TEST_DIR, true);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
index b353de1..def4b47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index 5d2365a..a7eba1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -21,10 +21,8 @@
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -56,8 +54,8 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -344,16 +342,16 @@
     DFSTestUtil.waitForDatanodeState(
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
     DFSClient client = getDfsClient(0);
-    assertEquals("maintenance node shouldn't be live", numDatanodes - 1,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes - 1,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "maintenance node shouldn't be live");
     assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
 
     getCluster().restartDataNode(dnProp, true);
     getCluster().waitActive();
     waitNodeState(nodeOutofService, AdminStates.ENTERING_MAINTENANCE);
     assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
-    assertEquals("maintenance node should be live", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "maintenance node should be live");
 
     cleanupFile(fileSys, file);
   }
@@ -479,7 +477,7 @@
     int fileBlockReplication = maintenanceMinRepl + 1;
     int numAddedDataNodes = 1;
     int numInitialDataNodes = (maintenanceMinRepl * 2 - numAddedDataNodes);
-    Assert.assertTrue(maintenanceMinRepl <= defaultReplication);
+    Assertions.assertTrue(maintenanceMinRepl <= defaultReplication);
     testFileBlockReplicationImpl(maintenanceMinRepl,
         numInitialDataNodes, numAddedDataNodes, fileBlockReplication);
 
@@ -557,8 +555,8 @@
         AdminStates.IN_MAINTENANCE);
 
     DFSClient client = getDfsClient(0);
-    assertEquals("All datanodes must be alive", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
 
     // test 1, verify the replica in IN_MAINTENANCE state isn't in LocatedBlock
     checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -784,14 +782,14 @@
         nodeOutofService);
 
     final DFSClient client = getDfsClient(0);
-    assertEquals("All datanodes must be alive", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
 
     getCluster().stopDataNode(nodeOutofService.getXferAddr());
     DFSTestUtil.waitForDatanodeState(
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
-    assertEquals("maintenance node shouldn't be alive", numDatanodes - 1,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes - 1,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "maintenance node shouldn't be alive");
 
     // Dead maintenance node's blocks should remain in block map.
     checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -840,15 +838,15 @@
         nodeOutofService);
 
     DFSClient client = getDfsClient(0);
-    assertEquals("All datanodes must be alive", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "All datanodes must be alive");
 
     MiniDFSCluster.DataNodeProperties dnProp =
         getCluster().stopDataNode(nodeOutofService.getXferAddr());
     DFSTestUtil.waitForDatanodeState(
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
-    assertEquals("maintenance node shouldn't be alive", numDatanodes - 1,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes - 1,
+              client.datanodeReport(DatanodeReportType.LIVE).length, "maintenance node shouldn't be alive");
 
     // Dead maintenance node's blocks should remain in block map.
     checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -1025,9 +1023,9 @@
   static String checkFile(FSNamesystem ns, FileSystem fileSys,
       Path name, int repl, DatanodeInfo expectedExcludedNode,
       DatanodeInfo expectedMaintenanceNode) throws IOException {
-    // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(),
-        fileSys instanceof DistributedFileSystem);
+      // need a raw stream
+      assertTrue(
+              fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
     HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
     BlockManager bm = ns.getBlockManager();
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
@@ -1126,9 +1124,9 @@
 
   static private DatanodeInfo[] getFirstBlockReplicasDatanodeInfos(
       FileSystem fileSys, Path name) throws IOException {
-    // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(),
-        fileSys instanceof DistributedFileSystem);
+      // need a raw stream
+      assertTrue(
+              fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
     HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     if (dinfo.iterator().hasNext()) { // for the first block
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index 74a8e44..6ae09f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -39,9 +39,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Before;
 import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeEach;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 
 /**
@@ -58,7 +57,7 @@
   private static final String CLUSTER_4 = "cluster4";
   private static final String CLUSTER_5 = "cluster5";
   protected File testDataPath;
-  @Before
+  @BeforeEach
   public void setUp() {
     testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
   }
@@ -232,8 +231,8 @@
           .numDataNodes(1)
           .checkDataNodeHostConfig(true)
           .build()) {
-      assertEquals("DataNode hostname config not respected", "MYHOST",
-          cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
+        assertEquals("MYHOST",
+                cluster5.getDataNodes().get(0).getDatanodeId().getHostName(), "DataNode hostname config not respected");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
index cc29a93..0582599 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
@@ -27,16 +27,16 @@
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceBlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import javax.management.*;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * The test makes sure that NameNode detects presense blocks that do not have
@@ -105,7 +105,7 @@
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanName = new ObjectName(
               "Hadoop:service=NameNode,name=NameNodeInfo");
-      Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
+      Assertions.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
                       "NumberOfMissingBlocks"));
 
       // now do the reverse : remove the file expect the number of missing 
@@ -121,7 +121,7 @@
       assertEquals(2, dfs.getLowRedundancyBlocksCount());
       assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
 
-      Assert.assertEquals(0, (long)(Long) mbs.getAttribute(mxbeanName,
+      Assertions.assertEquals(0, (long)(Long) mbs.getAttribute(mxbeanName,
               "NumberOfMissingBlocks"));
 
       Path replOneFile = new Path("/testMissingBlocks/replOneFile");
@@ -138,7 +138,7 @@
       }
       in.close();
       assertEquals(1, dfs.getMissingReplOneBlocksCount());
-      Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
+      Assertions.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
           "NumberOfMissingBlocksWithReplicationFactorOne"));
     } finally {
       if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
index 3978444..c5e2a82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -34,7 +34,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.util.ThreadUtil;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the decommissioning of nodes.
@@ -79,7 +79,7 @@
                                                    cluster.getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+      assertEquals(numDatanodes, info.length, "Number of Datanodes ");
     FileSystem fileSys = cluster.getFileSystem();
     int replicas = numDatanodes - 1;
     assertTrue(fileSys instanceof DistributedFileSystem);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
index a839d85..48666f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
@@ -35,7 +35,7 @@
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests hflushing concurrently from many threads.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
index d536c5e..8bc6aa8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
@@ -31,8 +31,8 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.security.token.Token;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_KEY;
@@ -40,7 +40,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SEND_QOP_ENABLED;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_NEW_QOP_KEY;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 
 /**
@@ -57,7 +57,7 @@
 
   private static HdfsConfiguration clusterConf;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     clusterConf = createSecureConfig(
         "authentication,integrity,privacy");
@@ -254,11 +254,11 @@
           .map(dn -> dn.getSaslClient().getTargetQOP())
           .filter("auth"::equals)
           .count();
-      // For each datanode pipeline, targetQOPs of sasl clients in the first two
-      // datanodes become equal to auth.
-      // Note that it is not necessarily the case for all datanodes,
-      // since a datanode may be always at the last position in pipelines.
-      assertTrue("At least two qops should be auth", count >= 2);
+        // For each datanode pipeline, targetQOPs of sasl clients in the first two
+        // datanodes become equal to auth.
+        // Note that it is not necessarily the case for all datanodes,
+        // since a datanode may be always at the last position in pipelines.
+        assertTrue(count >= 2, "At least two qops should be auth");
 
       clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
       FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
@@ -267,7 +267,7 @@
           .map(dn -> dn.getSaslClient().getTargetQOP())
           .filter("auth"::equals)
           .count();
-      assertTrue("At least two qops should be auth", count >= 2);
+        assertTrue(count >= 2, "At least two qops should be auth");
 
       clientConf.set(HADOOP_RPC_PROTECTION, "authentication");
       FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf);
@@ -276,7 +276,7 @@
           .map(dn -> dn.getSaslServer().getNegotiatedQOP())
           .filter("auth"::equals)
           .count();
-      assertEquals("All qops should be auth", 3, count);
+        assertEquals(3, count, "All qops should be auth");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
index 26aa28e..2c21908 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
@@ -18,11 +18,11 @@
 package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestParallelRead extends TestParallelReadUtil {
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
     // This is a test of the normal (TCP) read path.  For this reason, we turn
     // off both short-circuit local reads and UNIX domain socket data traffic.
@@ -37,7 +37,7 @@
     setupCluster(DEFAULT_REPLICATION_FACTOR, conf);
   }
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
     TestParallelReadUtil.teardownCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
index 857ab7b..1dc1175 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
@@ -32,8 +30,8 @@
 import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.util.Time;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -43,7 +41,7 @@
  * This class is marked as @Ignore so that junit doesn't try to execute the
  * tests in here directly.  They are executed from subclasses.
  */
-@Ignore
+@Disabled
 public class TestParallelReadUtil {
 
   static final Logger LOG = LoggerFactory.getLogger(TestParallelReadUtil.class);
@@ -260,9 +258,9 @@
      * Seek to somewhere random and read.
      */
     private void read(int start, int len) throws Exception {
-      assertTrue(
-          "Bad args: " + start + " + " + len + " should be <= " + fileSize,
-          start + len <= fileSize);
+        assertTrue(
+                start + len <= fileSize,
+                "Bad args: " + start + " + " + len + " should be <= " + fileSize);
       readCount++;
       DFSInputStream dis = testInfo.dis;
 
@@ -276,9 +274,9 @@
      * Positional read.
      */
     private void pRead(int start, int len) throws Exception {
-      assertTrue(
-          "Bad args: " + start + " + " + len + " should be <= " + fileSize,
-          start + len <= fileSize);
+        assertTrue(
+                start + len <= fileSize,
+                "Bad args: " + start + " + " + len + " should be <= " + fileSize);
       DFSInputStream dis = testInfo.dis;
 
       byte buf[] = new byte[len];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java
index 220e45b..5b8a712 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java
@@ -20,11 +20,11 @@
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestParallelShortCircuitLegacyRead extends TestParallelReadUtil {
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
     DFSInputStream.tcpReadsDisabledForTesting = true;
     HdfsConfiguration conf = new HdfsConfiguration();
@@ -40,7 +40,7 @@
     setupCluster(1, conf);
   }
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
     TestParallelReadUtil.teardownCluster();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java
index 3f352b4..2e8d649 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java
@@ -17,22 +17,20 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.hamcrest.CoreMatchers.equalTo;
-
 import java.io.File;
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestParallelShortCircuitRead extends TestParallelReadUtil {
   private static TemporarySocketDirectory sockDir;
 
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -47,12 +45,12 @@
     setupCluster(1, conf);
   }
 
-  @Before
+  @BeforeEach
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
   }
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java
index df110b4..3e30fc3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java
@@ -17,22 +17,20 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.hamcrest.CoreMatchers.equalTo;
-
 import java.io.File;
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestParallelShortCircuitReadNoChecksum extends TestParallelReadUtil {
   private static TemporarySocketDirectory sockDir;
 
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -47,12 +45,12 @@
     setupCluster(1, conf);
   }
 
-  @Before
+  @BeforeEach
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
   }
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
index ad26e18..b4d080e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
@@ -17,17 +17,15 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.hamcrest.CoreMatchers.equalTo;
-
 import java.io.File;
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This class tests short-circuit local reads without any FileInputStream or
@@ -36,7 +34,7 @@
 public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil {
   private static TemporarySocketDirectory sockDir;
 
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir = new TemporarySocketDirectory();
@@ -66,12 +64,12 @@
     setupCluster(1, conf);
   }
 
-  @Before
+  @BeforeEach
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
   }
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java
index 872ac38..b211e88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java
@@ -17,22 +17,20 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.hamcrest.CoreMatchers.equalTo;
-
 import java.io.File;
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestParallelUnixDomainRead extends TestParallelReadUtil {
   private static TemporarySocketDirectory sockDir;
 
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -46,12 +44,12 @@
     setupCluster(1, conf);
   }
 
-  @Before
+  @BeforeEach
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
   }
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
index 3658d75..b0681f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -44,7 +42,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -128,8 +126,8 @@
       // Check that the file has no less bytes than before the restart
       // This would mean that blocks were successfully persisted to the log
       FileStatus status = fs.getFileStatus(FILE_PATH);
-      assertTrue("Length too short: " + status.getLen(),
-          status.getLen() >= len);
+        assertTrue(
+                status.getLen() >= len, "Length too short: " + status.getLen());
       
       // And keep writing (ensures that leases are also persisted correctly)
       stream.write(DATA_AFTER_RESTART);
@@ -194,8 +192,8 @@
       // Check that the file has no less bytes than before the restart
       // This would mean that blocks were successfully persisted to the log
       FileStatus status = fs.getFileStatus(FILE_PATH);
-      assertTrue("Length incorrect: " + status.getLen(),
-          status.getLen() == len - BLOCK_SIZE);
+        assertTrue(
+                status.getLen() == len - BLOCK_SIZE, "Length incorrect: " + status.getLen());
 
       // Verify the data showed up from before restart, sans abandoned block.
       FSDataInputStream readStream = fs.open(FILE_PATH);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
index 4cead9c..ac32b8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.List;
@@ -35,9 +35,9 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 public class TestPipelines {
@@ -57,13 +57,13 @@
     setConfiguration();
   }
 
-  @Before
+  @BeforeEach
   public void startUpCluster() throws IOException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (fs != null) {
       fs.close();
@@ -108,10 +108,10 @@
       Replica r =
           cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
 
-      assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
-      assertEquals("Should be RBW replica on " + dn
-          + " after sequence of calls append()/write()/hflush()",
-          HdfsServerConstants.ReplicaState.RBW, r.getState());
+        assertTrue(r != null, "Replica on DN " + dn + " shouldn't be null");
+        assertEquals(
+                HdfsServerConstants.ReplicaState.RBW, r.getState(), "Should be RBW replica on " + dn
+                + " after sequence of calls append()/write()/hflush()");
     }
     ofs.close();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index c1e0dbb..d6dec94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.EOFException;
 import java.io.IOException;
@@ -50,9 +50,9 @@
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -77,7 +77,7 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(TestPread.class.getName());
 
-  @Before
+  @BeforeEach
   public void setup() {
     simulatedStorage = false;
     isHedgedRead = false;
@@ -98,10 +98,10 @@
       // should throw an exception
       res = e;
     }
-    assertTrue("Error reading beyond file boundary.", res != null);
+      assertTrue(res != null, "Error reading beyond file boundary.");
     in.close();
     if (!fileSys.delete(name, true))
-      assertTrue("Cannot delete file", false);
+        assertTrue(false, "Cannot delete file");
     
     // now create the real file
     DFSTestUtil.createFile(fileSys, name, fileSize, fileSize,
@@ -110,9 +110,9 @@
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                        expected[from+idx]+" actual "+actual[idx],
-                        actual[idx], expected[from+idx]);
+        assertEquals(
+                actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }
@@ -131,17 +131,17 @@
     while (nread < length) {
       int nbytes =
           stm.read(position + nread, buffer, offset + nread, length - nread);
-      assertTrue("Error in pread", nbytes > 0);
+        assertTrue(nbytes > 0, "Error in pread");
       nread += nbytes;
     }
 
     if (dfstm != null) {
       if (isHedgedRead) {
-        assertTrue("Expected read statistic to be incremented", length <= dfstm
-            .getReadStatistics().getTotalBytesRead() - totalRead);
+          assertTrue(length <= dfstm
+                  .getReadStatistics().getTotalBytesRead() - totalRead, "Expected read statistic to be incremented");
       } else {
-        assertEquals("Expected read statistic to be incremented", length, dfstm
-            .getReadStatistics().getTotalBytesRead() - totalRead);
+          assertEquals(length, dfstm
+                  .getReadStatistics().getTotalBytesRead() - totalRead, "Expected read statistic to be incremented");
       }
     }
   }
@@ -212,7 +212,7 @@
       // should throw an exception
       res = e;
     }
-    assertTrue("Error reading beyond file boundary.", res != null);
+      assertTrue(res != null, "Error reading beyond file boundary.");
     
     stm.close();
   }
@@ -544,9 +544,9 @@
       });
       try {
         future.get(4, TimeUnit.SECONDS);
-        Assert.fail();
+        Assertions.fail();
       } catch (ExecutionException ee) {
-        assertTrue(ee.toString(), ee.getCause() instanceof EOFException);
+          assertTrue(ee.getCause() instanceof EOFException, ee.toString());
       } finally {
         future.cancel(true);
         executor.shutdown();
@@ -601,7 +601,7 @@
       byte[] buffer = new byte[64 * 1024];
       input = dfsClient.open(filename);
       input.read(0, buffer, 0, 1024);
-      Assert.fail("Reading the block should have thrown BlockMissingException");
+      Assertions.fail("Reading the block should have thrown BlockMissingException");
     } catch (BlockMissingException e) {
       assertEquals(3, input.getHedgedReadOpsLoopNumForTesting());
       assertTrue(metrics.getHedgedReadOps() == 0);
@@ -760,10 +760,10 @@
       byte[] buf = new byte[1024];
       int n = din.read(0, buf, 0, data.length());
       assertEquals(data.length(), n);
-      assertEquals("Data should be read", data, new String(buf, 0, n));
-      assertTrue("Read should complete with maximum " + maxFailures
-              + " failures, but completed with " + din.failures,
-          din.failures <= maxFailures);
+        assertEquals(data, new String(buf, 0, n), "Data should be read");
+        assertTrue(
+                din.failures <= maxFailures, "Read should complete with maximum " + maxFailures
+                + " failures, but completed with " + din.failures);
       DFSClient.LOG.info("Read completed");
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 2e36b13..2c1fa4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -21,11 +21,8 @@
 import static org.hamcrest.CoreMatchers.allOf;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayOutputStream;
 import java.io.FileNotFoundException;
@@ -59,12 +56,11 @@
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
@@ -90,7 +86,7 @@
   @Rule
   public final Timeout testTestout = new Timeout(120000);
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpClass() throws Exception {
     conf = new HdfsConfiguration();
     conf.set(
@@ -132,7 +128,7 @@
     ERR_STREAM.reset();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownClass() {
     try {
       System.out.flush();
@@ -288,7 +284,7 @@
     try {
       fout.write(new byte[fileLen]);
       fout.close();
-      Assert.fail();
+      Assertions.fail();
     } catch (QuotaExceededException e) {
       IOUtils.closeStream(fout);
     }
@@ -396,8 +392,8 @@
     ugi.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws Exception {
-        assertEquals("Not running as new user", username,
-            UserGroupInformation.getCurrentUser().getShortUserName());
+          assertEquals(username,
+                  UserGroupInformation.getCurrentUser().getShortUserName(), "Not running as new user");
         DFSAdmin userAdmin = new DFSAdmin(conf);
 
         args2[1] = "100";
@@ -974,8 +970,8 @@
     assertTrue(dfs.mkdirs(parent));
 
     final FileSystem fs = cluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-                fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
     final DistributedFileSystem dfs = (DistributedFileSystem)fs;
 
     // create test directory
@@ -986,25 +982,25 @@
     dfs.setQuota(testFolder, Long.MAX_VALUE - 1, 10);
     ContentSummary c = dfs.getContentSummary(testFolder);
     compareQuotaUsage(c, dfs, testFolder);
-    assertTrue("Quota not set properly", c.getQuota() == Long.MAX_VALUE - 1);
+      assertTrue(c.getQuota() == Long.MAX_VALUE - 1, "Quota not set properly");
 
     // setting diskspace quota to Long.MAX_VALUE - 1 should work
     dfs.setQuota(testFolder, 10, Long.MAX_VALUE - 1);
     c = dfs.getContentSummary(testFolder);
     compareQuotaUsage(c, dfs, testFolder);
-    assertTrue("Quota not set properly", c.getSpaceQuota() == Long.MAX_VALUE - 1);
+      assertTrue(c.getSpaceQuota() == Long.MAX_VALUE - 1, "Quota not set properly");
 
     // setting namespace quota to Long.MAX_VALUE should not work + no error
     dfs.setQuota(testFolder, Long.MAX_VALUE, 10);
     c = dfs.getContentSummary(testFolder);
     compareQuotaUsage(c, dfs, testFolder);
-    assertTrue("Quota should not have changed", c.getQuota() == 10);
+      assertTrue(c.getQuota() == 10, "Quota should not have changed");
 
     // setting diskspace quota to Long.MAX_VALUE should not work + no error
     dfs.setQuota(testFolder, 10, Long.MAX_VALUE);
     c = dfs.getContentSummary(testFolder);
     compareQuotaUsage(c, dfs, testFolder);
-    assertTrue("Quota should not have changed", c.getSpaceQuota() == 10);
+      assertTrue(c.getSpaceQuota() == 10, "Quota should not have changed");
 
     // setting namespace quota to Long.MAX_VALUE + 1 should not work + error
     try {
@@ -1057,8 +1053,8 @@
     c = dfs.getContentSummary(dir);
     compareQuotaUsage(c, dfs, dir);
     checkContentSummary(c, webhdfs.getContentSummary(dir));
-    assertEquals("Quota is half consumed", QUOTA_SIZE / 2,
-                 c.getSpaceConsumed());
+      assertEquals(QUOTA_SIZE / 2,
+              c.getSpaceConsumed(), "Quota is half consumed");
 
     // We can not create the 2nd file because even though the total spaced
     // used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512)
@@ -1071,7 +1067,7 @@
     } catch (QuotaExceededException e) {
       exceededQuota = true;
     }
-    assertTrue("Quota not exceeded", exceededQuota);
+      assertTrue(exceededQuota, "Quota not exceeded");
  }
 
  /**
@@ -1108,9 +1104,9 @@
       //Test for deafult NameSpace Quota
       long nsQuota = FSImageTestUtil.getNSQuota(dfsCluster.getNameNode()
           .getNamesystem());
-      assertTrue(
-          "Default namespace quota expected as long max. But the value is :"
-              + nsQuota, nsQuota == Long.MAX_VALUE);
+        assertTrue(nsQuota == Long.MAX_VALUE,
+                "Default namespace quota expected as long max. But the value is :"
+                        + nsQuota);
       
       Path dir = new Path(parent, "test");
       boolean exceededQuota = false;
@@ -1145,10 +1141,10 @@
       c = fs.getContentSummary(dir);
       compareQuotaUsage(c, fs, dir);
       checkContentSummary(c, webHDFS.getContentSummary(dir));
-      assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3,
-          c.getSpaceConsumed());
-      assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3),
-          3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
+        assertEquals(59 * FILE_SIZE * 3,
+                c.getSpaceConsumed(), "Invalid space consumed");
+        assertEquals(QUOTA_SIZE - (59 * FILE_SIZE * 3),
+                3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE), "Invalid space consumed");
 
       // Now check that trying to create another file violates the quota
       try {
@@ -1158,7 +1154,7 @@
       } catch (QuotaExceededException e) {
         exceededQuota = true;
       }
-      assertTrue("Quota not exceeded", exceededQuota);
+        assertTrue(exceededQuota, "Quota not exceeded");
       assertEquals(2, dfsCluster.getNamesystem().getFSDirectory().getYieldCount());
     } finally {
       dfsCluster.shutdown();
@@ -1239,11 +1235,11 @@
         new String[] {"-setSpaceQuota", "-10", dir.toString()});
     assertEquals(-1, ret);
     scanIntoList(ERR_STREAM, outs);
-    assertEquals(
-        "It should be two lines of error messages,"
-        + " the 1st one is about Illegal option,"
-        + " the 2nd one is about SetSpaceQuota usage.",
-        2, outs.size());
+      assertEquals(
+              2, outs.size(),
+              "It should be two lines of error messages,"
+                      + " the 1st one is about Illegal option,"
+                      + " the 2nd one is about SetSpaceQuota usage.");
     assertThat(outs.get(0),
         is(allOf(containsString("setSpaceQuota"),
             containsString("Illegal option"))));
@@ -1316,9 +1312,9 @@
     final QuotaUsage quotaUsage = dfs.getQuotaUsage(dir);
     assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
     scanIntoList(OUT_STREAM, outs);
-    assertTrue(
-        "There should be no output if it runs successfully.",
-        outs.isEmpty());
+      assertTrue(
+              outs.isEmpty(),
+              "There should be no output if it runs successfully.");
   }
 
   /**
@@ -1372,9 +1368,9 @@
         spaceQuotaByStorageType,
         quotaUsage.getTypeQuota(StorageType.DISK));
     scanIntoList(OUT_STREAM, outs);
-    assertTrue(
-        "There should be no output if it runs successfully.",
-        outs.isEmpty());
+      assertTrue(
+              outs.isEmpty(),
+              "There should be no output if it runs successfully.");
   }
 
   /**
@@ -1414,10 +1410,10 @@
     final int ret = ToolRunner.run(dfsAdmin, args);
     assertEquals(cmdRet, ret);
     scanIntoList(ERR_STREAM, outs);
-    assertEquals(
-        "It should be one line error message like: clrSpaceQuota:"
-            + " Directory does not exist: <full path of XXX directory>",
-        1, outs.size());
+      assertEquals(
+              1, outs.size(),
+              "It should be one line error message like: clrSpaceQuota:"
+                      + " Directory does not exist: <full path of XXX directory>");
     assertThat(outs.get(0),
         is(allOf(containsString(cmdName),
             containsString("does not exist"),
@@ -1465,10 +1461,10 @@
     final int ret = ToolRunner.run(dfsAdmin, args);
     assertEquals(cmdRet, ret);
     scanIntoList(ERR_STREAM, outs);
-    assertEquals(
-        "It should be one line error message like: clrSpaceQuota:"
-            + " <full path of XXX file> is not a directory",
-        1, outs.size());
+      assertEquals(
+              1, outs.size(),
+              "It should be one line error message like: clrSpaceQuota:"
+                      + " <full path of XXX file> is not a directory");
     assertThat(outs.get(0),
         is(allOf(containsString(cmdName),
             containsString(file.toString()),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuotaAllowOwner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuotaAllowOwner.java
index 55221b4..ff2b422 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuotaAllowOwner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuotaAllowOwner.java
@@ -23,22 +23,22 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestQuotaAllowOwner {
   private static Configuration conf;
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem dfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpClass() throws Exception {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
@@ -47,7 +47,7 @@
     restartCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownClass() {
     if (cluster != null) {
       cluster.shutdown();
@@ -96,8 +96,8 @@
     UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
         userName,  new String[]{groupName});
     ugi.doAs((PrivilegedExceptionAction<Object>) () -> {
-      assertEquals("Not running as new user", userName,
-          UserGroupInformation.getCurrentUser().getShortUserName());
+        assertEquals(userName,
+                UserGroupInformation.getCurrentUser().getShortUserName(), "Not running as new user");
       DFSAdmin userAdmin = new DFSAdmin(conf);
 
       String[] args2 = new String[]{"-setQuota", "5", subDir};
@@ -106,18 +106,18 @@
       TestQuota.runCommand(userAdmin, args2, false);
 
       ContentSummary c = dfs.getContentSummary(new Path(subDir));
-      assertEquals("Not same with setting quota",
-          5, c.getQuota());
-      assertEquals("Not same with setting space quota",
-          64, c.getSpaceQuota());
+        assertEquals(
+                5, c.getQuota(), "Not same with setting quota");
+        assertEquals(
+                64, c.getSpaceQuota(), "Not same with setting space quota");
       args2 = new String[]{"-clrQuota", subDir};
       TestQuota.runCommand(userAdmin, args2, false);
       args2 = new String[]{"-clrSpaceQuota", subDir};
       TestQuota.runCommand(userAdmin, args2, false);
       c = dfs.getContentSummary(new Path(subDir));
-      assertEquals("Not clean quota", -1, c.getQuota());
-      assertEquals("Not clean space quota",
-          -1, c.getSpaceQuota());
+        assertEquals(-1, c.getQuota(), "Not clean quota");
+        assertEquals(
+                -1, c.getSpaceQuota(), "Not clean space quota");
       return null;
     });
   }
@@ -138,8 +138,8 @@
     UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting(
         userName,  new String[]{groupName});
     ugi2.doAs((PrivilegedExceptionAction<Object>) () -> {
-      assertEquals("Not running as new user", userName,
-          UserGroupInformation.getCurrentUser().getShortUserName());
+        assertEquals(userName,
+                UserGroupInformation.getCurrentUser().getShortUserName(), "Not running as new user");
       DFSAdmin userAdmin = new DFSAdmin(conf);
 
       String[] args2 = new String[]{"-setQuota", "5", subDir};
@@ -167,8 +167,8 @@
     UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
         userOther, new String[]{groupOther});
     ugi.doAs((PrivilegedExceptionAction<Object>) () -> {
-      assertEquals("Not running as new user", userOther,
-          UserGroupInformation.getCurrentUser().getShortUserName());
+        assertEquals(userOther,
+                UserGroupInformation.getCurrentUser().getShortUserName(), "Not running as new user");
       DFSAdmin userAdmin = new DFSAdmin(conf);
 
       String[] args2 = new String[]{"-setQuota", "5", subDir.toString()};
@@ -196,8 +196,8 @@
     UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
         userOther, new String[]{groupName});
     ugi.doAs((PrivilegedExceptionAction<Object>) () -> {
-      assertEquals("Not running as new user", userOther,
-          UserGroupInformation.getCurrentUser().getShortUserName());
+        assertEquals(userOther,
+                UserGroupInformation.getCurrentUser().getShortUserName(), "Not running as new user");
       DFSAdmin userAdmin = new DFSAdmin(conf);
 
       String[] args2 = new String[]{"-setQuota", "5", subDir.toString()};
@@ -228,8 +228,8 @@
       UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
           userName, new String[]{groupName});
       ugi.doAs((PrivilegedExceptionAction<Object>) () -> {
-        assertEquals("Not running as new user", userName,
-            UserGroupInformation.getCurrentUser().getShortUserName());
+          assertEquals(userName,
+                  UserGroupInformation.getCurrentUser().getShortUserName(), "Not running as new user");
 
         DFSAdmin userAdmin = new DFSAdmin(conf);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java
index 974fdf8..a708e47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java
@@ -32,7 +32,6 @@
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -40,6 +39,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil.ShortCircuitTestContext;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 public class TestRead {
   final private int BLOCK_SIZE = 512;
@@ -51,14 +51,14 @@
     FSDataInputStream fis = fs.open(path);
     ByteBuffer empty = ByteBuffer.allocate(0);
     // A read into an empty bytebuffer at the beginning of the file gives 0.
-    Assert.assertEquals(0, fis.read(empty));
+    Assertions.assertEquals(0, fis.read(empty));
     fis.seek(fileLength);
     // A read into an empty bytebuffer at the end of the file gives -1.
-    Assert.assertEquals(-1, fis.read(empty));
+    Assertions.assertEquals(-1, fis.read(empty));
     if (fileLength > BLOCK_SIZE) {
       fis.seek(fileLength - BLOCK_SIZE + 1);
       ByteBuffer dbb = ByteBuffer.allocateDirect(BLOCK_SIZE);
-      Assert.assertEquals(BLOCK_SIZE - 1, fis.read(dbb));
+      Assertions.assertEquals(BLOCK_SIZE - 1, fis.read(dbb));
     }
     fis.close();
   }
@@ -106,7 +106,7 @@
     try {
       FileSystem fs = cluster.getFileSystem();
       fs.open(new Path("/.reserved/.inodes/file"));
-      Assert.fail("Open a non existing file should fail.");
+      Assertions.fail("Open a non existing file should fail.");
     } catch (FileNotFoundException e) {
       // Expected
     } finally {
@@ -149,7 +149,7 @@
       reader.interrupt();
       reader.join();
 
-      Assert.assertTrue(readInterrupted.get());
+      Assertions.assertTrue(readInterrupted.get());
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDNFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDNFailure.java
index 40ac206..1265527 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDNFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDNFailure.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -52,13 +52,13 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     cluster = initializeCluster();
     dfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     tearDownCluster(cluster);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
index 2fb9212..85cae41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
@@ -29,11 +29,11 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -60,13 +60,13 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     cluster = initializeCluster();
     dfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     tearDownCluster(cluster);
   }
@@ -86,7 +86,7 @@
     // corrupt the first data block
     int dnIndex = ReadStripedFileWithDecodingHelper.findFirstDataNode(
         cluster, dfs, file, CELL_SIZE * NUM_DATA_UNITS);
-    Assert.assertNotEquals(-1, dnIndex);
+    Assertions.assertNotEquals(-1, dnIndex);
     LocatedStripedBlock slb = (LocatedStripedBlock) dfs.getClient()
         .getLocatedBlocks(file.toString(), 0, CELL_SIZE * NUM_DATA_UNITS)
         .get(0);
@@ -95,7 +95,7 @@
     // find the first block file
     File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
     File blkFile = MiniDFSCluster.getBlockFile(storageDir, blks[0].getBlock());
-    Assert.assertTrue("Block file does not exist", blkFile.exists());
+      Assertions.assertTrue(blkFile.exists(), "Block file does not exist");
     // corrupt the block file
     LOG.info("Deliberately corrupting file " + blkFile.getName());
     try (FileOutputStream out = new FileOutputStream(blkFile)) {
@@ -118,7 +118,7 @@
       final BlockManager bm = ns.getBlockManager();
       BlockInfo blockInfo = (ns.getFSDirectory().getINode4Write(file.toString())
           .asFile().getBlocks())[0];
-      Assert.assertEquals(1, bm.getCorruptReplicas(blockInfo).size());
+      Assertions.assertEquals(1, bm.getCorruptReplicas(blockInfo).size());
     } finally {
       for (DataNode dn : cluster.getDataNodes()) {
         DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
@@ -135,7 +135,7 @@
 
     int dnIndex = findFirstDataNode(cluster, dfs, file,
         CELL_SIZE * NUM_DATA_UNITS);
-    Assert.assertNotEquals(-1, dnIndex);
+    Assertions.assertNotEquals(-1, dnIndex);
     LocatedStripedBlock slb = (LocatedStripedBlock) dfs.getClient()
         .getLocatedBlocks(file.toString(), 0, CELL_SIZE * NUM_DATA_UNITS)
         .get(0);
@@ -156,7 +156,7 @@
       final BlockManager bm = fsn.getBlockManager();
       DatanodeDescriptor dnd =
           NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
-      Assert.assertTrue(bm.containsInvalidateBlock(
+      Assertions.assertTrue(bm.containsInvalidateBlock(
           blks[0].getLocations()[0], b) || dnd.containsInvalidateBlock(b));
     } finally {
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingCorruptData.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingCorruptData.java
index 5a8fb4f..d2708b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingCorruptData.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingCorruptData.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -47,13 +47,13 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     cluster = initializeCluster();
     dfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     tearDownCluster(cluster);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingDeletedData.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingDeletedData.java
index c267e84..3f4d351 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingDeletedData.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingDeletedData.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -48,13 +48,13 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     cluster = initializeCluster();
     dfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     tearDownCluster(cluster);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
index 4c864b3..7632655 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
@@ -26,8 +26,8 @@
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.Rule;
 import org.junit.rules.Timeout;
 
@@ -124,7 +124,7 @@
 
     // make sure there are missing block locations
     BlockLocation[] newLocs = fs.getFileBlockLocations(srcPath, 0, cellSize);
-    Assert.assertTrue(
+    Assertions.assertTrue(
         newLocs[0].getNames().length < locs[0].getNames().length);
 
     byte[] smallBuf = new byte[1024];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
index 8de1799..7593343 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
@@ -32,8 +32,8 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /** Test reading from hdfs while a file is being written. */
@@ -149,11 +149,11 @@
     final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);
 
     //Check visible length
-    Assert.assertTrue(in.getVisibleLength() >= expectedsize);
+    Assertions.assertTrue(in.getVisibleLength() >= expectedsize);
 
     //Able to read?
     for(int i = 0; i < expectedsize; i++) {
-      Assert.assertEquals((byte)i, (byte)in.read());  
+      Assertions.assertEquals((byte)i, (byte)in.read());  
     }
 
     in.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index fa3c1aa..e5f3440 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -66,10 +66,10 @@
 import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 public class TestReconstructStripedFile {
@@ -124,7 +124,7 @@
     return cluster;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     ecPolicy = getEcPolicy();
     dataBlkNum = ecPolicy.getNumDataUnits();
@@ -165,7 +165,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -328,9 +328,9 @@
   void assertFileBlocksReconstruction(String fileName, int fileLen,
       ReconstructionType type, int toRecoverBlockNum) throws Exception {
     if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
-      Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
+      Assertions.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
     }
-    assertTrue("File length must be positive.", fileLen > 0);
+      assertTrue(fileLen > 0, "File length must be positive.");
 
     Path file = new Path(fileName);
 
@@ -423,7 +423,7 @@
       byte[] replicaContentAfterReconstruction =
           DFSTestUtil.readFileAsBytes(replicaAfterReconstruction);
 
-      Assert.assertArrayEquals(replicaContents[i], replicaContentAfterReconstruction);
+      Assertions.assertArrayEquals(replicaContents[i], replicaContentAfterReconstruction);
     }
   }
 
@@ -442,7 +442,7 @@
         }
       }
       if (result[i] == -1) {
-        Assert.fail("Failed to reconstruct striped block: "
+        Assertions.fail("Failed to reconstruct striped block: "
             + blocks[i].getBlockId());
       }
     }
@@ -511,8 +511,8 @@
     }
 
     // Inject data-loss by tear down desired number of DataNodes.
-    assumeTrue("Ignore case where num dead DNs > num parity units",
-        policy.getNumParityUnits() >= deadDN);
+    assumeTrue(policy.getNumParityUnits() >= deadDN,
+        "Ignore case where num dead DNs > num parity units");
     List<DataNode> dataNodes = new ArrayList<>(cluster.getDataNodes());
     Collections.shuffle(dataNodes);
     for (DataNode dn : dataNodes.subList(0, deadDN)) {
@@ -526,9 +526,9 @@
     // Make sure that all pending reconstruction tasks can be processed.
     while (ns.getPendingReconstructionBlocks() > 0) {
       long timeoutPending = ns.getNumTimedOutPendingReconstructions();
-      assertEquals(String
-          .format("Found %d timeout pending reconstruction tasks",
-              timeoutPending), 0, timeoutPending);
+        assertEquals(0, timeoutPending, String
+                .format("Found %d timeout pending reconstruction tasks",
+                        timeoutPending));
       Thread.sleep(1000);
     }
 
@@ -612,8 +612,8 @@
    */
   @Test(timeout = 120000)
   public void testTimeoutReadBlockInReconstruction() throws Exception {
-    assumeTrue("Ignore case where num parity units <= 1",
-        ecPolicy.getNumParityUnits() > 1);
+    assumeTrue(ecPolicy.getNumParityUnits() > 1,
+        "Ignore case where num parity units <= 1");
     int stripedBufferSize = conf.getInt(
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
         cellSize);
@@ -630,7 +630,7 @@
 
     LocatedBlocks locatedBlocks =
         StripedFileTestUtil.getLocatedBlocks(file, fs);
-    Assert.assertEquals(1, locatedBlocks.getLocatedBlocks().size());
+    Assertions.assertEquals(1, locatedBlocks.getLocatedBlocks().size());
     // The file only has one block group
     LocatedBlock lblock = locatedBlocks.get(0);
     DatanodeInfo[] datanodeinfos = lblock.getLocations();
@@ -642,10 +642,10 @@
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY,
         DFSConfigKeys.
             DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT);
-    Assert.assertTrue(
-        DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY
-            + " must be greater than 2000",
-        stripedReadTimeoutInMills > 2000);
+      Assertions.assertTrue(
+              stripedReadTimeoutInMills > 2000,
+              DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY
+                      + " must be greater than 2000");
 
     DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
     DataNodeFaultInjector timeoutInjector = new DataNodeFaultInjector() {
@@ -665,7 +665,7 @@
                 stripedReadTimeoutInMills * 3
             );
           } catch (TimeoutException e) {
-            Assert.fail("Can't reconstruct the file's first part.");
+            Assertions.fail("Can't reconstruct the file's first part.");
           } catch (InterruptedException e) {
           }
         }
@@ -700,8 +700,8 @@
    */
   @Test(timeout = 120000)
   public void testAbnormallyCloseDoesNotWriteBufferAgain() throws Exception {
-    assumeTrue("Ignore case where num parity units <= 1",
-        ecPolicy.getNumParityUnits() > 1);
+    assumeTrue(ecPolicy.getNumParityUnits() > 1,
+        "Ignore case where num parity units <= 1");
     int stripedBufferSize = conf.getInt(
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
         cellSize);
@@ -714,7 +714,7 @@
 
     LocatedBlocks locatedBlocks =
         StripedFileTestUtil.getLocatedBlocks(file, fs);
-    Assert.assertEquals(1, locatedBlocks.getLocatedBlocks().size());
+    Assertions.assertEquals(1, locatedBlocks.getLocatedBlocks().size());
     // The file only has one block group
     LocatedBlock lblock = locatedBlocks.get(0);
     DatanodeInfo[] datanodeinfos = lblock.getLocations();
@@ -726,10 +726,10 @@
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY,
         DFSConfigKeys.
             DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT);
-    Assert.assertTrue(
-        DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY
-            + " must be greater than 2000",
-        stripedReadTimeoutInMills > 2000);
+      Assertions.assertTrue(
+              stripedReadTimeoutInMills > 2000,
+              DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY
+                      + " must be greater than 2000");
 
     ElasticByteBufferPool bufferPool =
         (ElasticByteBufferPool) ErasureCodingTestHelper.getBufferPool();
@@ -758,7 +758,7 @@
                 stripedReadTimeoutInMills * 3
             );
           } catch (TimeoutException e) {
-            Assert.fail("Can't reconstruct the file's first part.");
+            Assertions.fail("Can't reconstruct the file's first part.");
           } catch (InterruptedException e) {
           }
         }
@@ -773,7 +773,7 @@
                 stripedReadTimeoutInMills * 3
             );
           } catch (TimeoutException e) {
-            Assert.fail("Can't reconstruct the file's remaining part.");
+            Assertions.fail("Can't reconstruct the file's remaining part.");
           } catch (InterruptedException e) {
           }
         }
@@ -799,7 +799,7 @@
                 stripedReadTimeoutInMills * 3
             );
           } catch (TimeoutException e) {
-            Assert.fail("Can't finish the file's reconstruction.");
+            Assertions.fail("Can't finish the file's reconstruction.");
           } catch (InterruptedException e) {
           }
         }
@@ -827,7 +827,7 @@
     while (bufferPool.size(direct) != 0) {
       // iterate all ByteBuffers in ElasticByteBufferPool
       ByteBuffer byteBuffer =  bufferPool.getBuffer(direct, 0);
-      Assert.assertEquals(0, byteBuffer.position());
+      Assertions.assertEquals(0, byteBuffer.position());
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFileWithValidator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFileWithValidator.java
index 00749ef..969c494 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFileWithValidator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFileWithValidator.java
@@ -20,8 +20,8 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,7 +59,7 @@
     cluster.getDataNodes().stream()
         .map(DataNode::getMetrics)
         .map(DataNodeMetrics::getECInvalidReconstructionTasks)
-        .forEach(n -> Assert.assertEquals(0, (long) n));
+        .forEach(n -> Assertions.assertEquals(0, (long) n));
 
     DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
     DataNodeFaultInjector badDecodingInjector = new DataNodeFaultInjector() {
@@ -93,7 +93,7 @@
           .map(DataNode::getMetrics)
           .mapToLong(DataNodeMetrics::getECInvalidReconstructionTasks)
           .sum();
-      Assert.assertEquals(1, sum);
+      Assertions.assertEquals(1, sum);
     } finally {
       DataNodeFaultInjector.set(oldInjector);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
index 9b29fe8..7201e0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.spy;
 
@@ -27,7 +27,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.slf4j.event.Level;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
index 432a297..a52eba8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java
@@ -31,8 +31,8 @@
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * Verify the behaviours of HdfsClientConfigKeys.BlockWrite.
@@ -127,7 +127,7 @@
       for (SlowWriter s : slowwriters) {
         try {
           s.out.getCurrentBlockReplication();
-          Assert.fail(
+          Assertions.fail(
               "Must throw exception as failed to add a new datanode for write "
                   + "pipeline, minimum failure replication");
         } catch (IOException e) {
@@ -198,7 +198,7 @@
       cluster.waitFirstBRCompleted(0, 10000);
       // check replication and interrupt.
       for (SlowWriter s : slowwriters) {
-        Assert.assertEquals(failRF, s.out.getCurrentBlockReplication());
+        Assertions.assertEquals(failRF, s.out.getCurrentBlockReplication());
         s.interruptRunning();
       }
 
@@ -228,7 +228,7 @@
         for (int j = 0, x;; j++) {
           x = in.read();
           if ((x) != -1) {
-            Assert.assertEquals(j, x);
+            Assertions.assertEquals(j, x);
           } else {
             return;
           }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
index 5015722..e2df609 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
@@ -37,8 +37,8 @@
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -79,7 +79,7 @@
     for(short replication = 1; replication <= infos.length; replication++) {
       for(int nExistings = 0; nExistings < datanodes.length; nExistings++) {
         final DatanodeInfo[] existings = datanodes[nExistings];
-        Assert.assertEquals(nExistings, existings.length);
+        Assertions.assertEquals(nExistings, existings.length);
 
         for(int i = 0; i < isAppend.length; i++) {
           for(int j = 0; j < isHflushed.length; j++) {
@@ -100,7 +100,7 @@
             final boolean computed = p.satisfy(
                 replication, existings, isAppend[i], isHflushed[j]);
             try {
-              Assert.assertEquals(expected, computed);
+              Assertions.assertEquals(expected, computed);
             } catch(AssertionError e) {
               final String s = "replication=" + replication
                            + "\nnExistings =" + nExistings
@@ -185,7 +185,7 @@
         try {
           in = fs.open(slowwriters[i].filepath);
           for(int j = 0, x; (x = in.read()) != -1; j++) {
-            Assert.assertEquals(j, x);
+            Assertions.assertEquals(j, x);
           }
         }
         finally {
@@ -270,7 +270,7 @@
     }
 
     void checkReplication() throws IOException {
-      Assert.assertEquals(REPLICATION, out.getCurrentBlockReplication());
+      Assertions.assertEquals(REPLICATION, out.getCurrentBlockReplication());
     }        
   }
 
@@ -290,8 +290,8 @@
         LOG.info("create an empty file " + f);
         fs.create(f, REPLICATION).close();
         final FileStatus status = fs.getFileStatus(f);
-        Assert.assertEquals(REPLICATION, status.getReplication());
-        Assert.assertEquals(0L, status.getLen());
+        Assertions.assertEquals(REPLICATION, status.getReplication());
+        Assertions.assertEquals(0L, status.getLen());
       }
       
       
@@ -303,8 +303,8 @@
         out.close();
 
         final FileStatus status = fs.getFileStatus(f);
-        Assert.assertEquals(REPLICATION, status.getReplication());
-        Assert.assertEquals(bytes.length, status.getLen());
+        Assertions.assertEquals(REPLICATION, status.getReplication());
+        Assertions.assertEquals(bytes.length, status.getLen());
       }
 
       {
@@ -314,7 +314,7 @@
           out.write(bytes);
           out.close();
 
-          Assert.fail();
+          Assertions.fail();
         } catch(IOException ioe) {
           LOG.info("This exception is expected", ioe);
         }
@@ -346,8 +346,8 @@
         out.close();
 
         final FileStatus status = fs.getFileStatus(f);
-        Assert.assertEquals(REPLICATION, status.getReplication());
-        Assert.assertEquals(bytes.length, status.getLen());
+        Assertions.assertEquals(REPLICATION, status.getReplication());
+        Assertions.assertEquals(bytes.length, status.getLen());
       }
 
       {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
index 4948c6f..bf9cddb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
@@ -19,9 +19,7 @@
 
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 
 import java.util.function.Supplier;
@@ -243,9 +241,9 @@
     int blockFilesCorrupted =
         corruptBlockByDeletingBlockFile?
             cluster.corruptBlockOnDataNodesByDeletingBlockFile(block) :
-              cluster.corruptBlockOnDataNodes(block);       
+              cluster.corruptBlockOnDataNodes(block);
 
-    assertEquals("Corrupted too few blocks", replFactor, blockFilesCorrupted); 
+      assertEquals(replFactor, blockFilesCorrupted, "Corrupted too few blocks"); 
 
     // Increase replication factor, this should invoke transfer request
     // Receiving datanode fails on checksum and reports it to namenode
@@ -310,7 +308,7 @@
       replicaCount = dfsClient.getNamenode()
           .getBlockLocations(file1.toString(), 0, Long.MAX_VALUE).get(0)
           .getLocations().length;
-      assertEquals("replication should not success", 1, replicaCount);
+        assertEquals(1, replicaCount, "replication should not success");
     } finally {
       cluster.shutdown();
     }
@@ -351,7 +349,7 @@
     DFSClient client = new DFSClient(addr, conf);
     
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+      assertEquals(numDatanodes, info.length, "Number of Datanodes ");
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("/smallblocktest.dat");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
index 0eb0605..c112069 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
@@ -42,9 +42,9 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
@@ -52,10 +52,7 @@
 import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesNotEqual;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.GenericTestUtils.assertMatches;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestReservedRawPaths {
 
@@ -72,7 +69,7 @@
   protected static final EnumSet< CreateEncryptionZoneFlag > NO_TRASH =
       EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration();
     fsHelper = new FileSystemTestHelper();
@@ -98,7 +95,7 @@
     DFSTestUtil.createKey(TEST_KEY, cluster, conf);
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -160,18 +157,18 @@
     final FileStatus p1Stat = fs.getFileStatus(p1);
     final FileStatus p2Stat = fs.getFileStatus(p2);
 
-    /*
-     * Use accessTime and modificationTime as substitutes for INode to check
-     * for resolution to the same underlying file.
-     */
-    assertEquals("Access times not equal", p1Stat.getAccessTime(),
-        p2Stat.getAccessTime());
-    assertEquals("Modification times not equal", p1Stat.getModificationTime(),
-        p2Stat.getModificationTime());
-    assertEquals("pathname1 not equal", p1,
-        Path.getPathWithoutSchemeAndAuthority(p1Stat.getPath()));
-    assertEquals("pathname1 not equal", p2,
-            Path.getPathWithoutSchemeAndAuthority(p2Stat.getPath()));
+      /*
+       * Use accessTime and modificationTime as substitutes for INode to check
+       * for resolution to the same underlying file.
+       */
+      assertEquals(p1Stat.getAccessTime(),
+              p2Stat.getAccessTime(), "Access times not equal");
+      assertEquals(p1Stat.getModificationTime(),
+              p2Stat.getModificationTime(), "Modification times not equal");
+      assertEquals(p1,
+              Path.getPathWithoutSchemeAndAuthority(p1Stat.getPath()), "pathname1 not equal");
+      assertEquals(p2,
+              Path.getPathWithoutSchemeAndAuthority(p2Stat.getPath()), "pathname1 not equal");
   }
 
   /**
@@ -343,7 +340,7 @@
     }
 
     final FileStatus[] fileStatuses = fs.listStatus(new Path("/.reserved/raw"));
-    assertEquals("expected 1 entry", fileStatuses.length, 1);
+      assertEquals(fileStatuses.length, 1, "expected 1 entry");
     assertMatches(fileStatuses[0].getPath().toString(), "/.reserved/raw/base");
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java
index f6345a3..b017207 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java
@@ -18,14 +18,14 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * A JUnit test for checking if restarting DFS preserves integrity.
@@ -70,8 +70,8 @@
       // Here we restart the MiniDFScluster without formatting namenode
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build(); 
       FileSystem fs = cluster.getFileSystem();
-      assertTrue("Filesystem corrupted after restart.",
-                 files.checkFiles(fs, dir));
+        assertTrue(
+                files.checkFiles(fs, dir), "Filesystem corrupted after restart.");
 
       final FileStatus newrootstatus = fs.getFileStatus(rootpath);
       assertEquals(rootmtime, newrootstatus.getModificationTime());
@@ -94,8 +94,8 @@
       // the image written in parallel to both places did not get corrupted
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
       FileSystem fs = cluster.getFileSystem();
-      assertTrue("Filesystem corrupted after restart.",
-                 files.checkFiles(fs, dir));
+        assertTrue(
+                files.checkFiles(fs, dir), "Filesystem corrupted after restart.");
 
       final FileStatus newrootstatus = fs.getFileStatus(rootpath);
       assertEquals(rootmtime, newrootstatus.getModificationTime());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index a716335..d0dbd34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -55,13 +55,11 @@
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 import static org.apache.hadoop.hdfs.server.namenode.ImageServlet.RECENT_IMAGE_CHECK_ENABLED;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * This class tests rolling upgrade.
@@ -75,7 +73,7 @@
     if (success) {
       assertEquals(0, dfsadmin.run(args));
     } else {
-      Assert.assertTrue(dfsadmin.run(args) != 0);
+      Assertions.assertTrue(dfsadmin.run(args) != 0);
     }
   }
 
@@ -131,9 +129,9 @@
 
         // All directories created before upgrade, when upgrade in progress and
         // after upgrade finalize exists
-        Assert.assertTrue(dfs.exists(foo));
-        Assert.assertTrue(dfs.exists(bar));
-        Assert.assertTrue(dfs.exists(baz));
+        Assertions.assertTrue(dfs.exists(foo));
+        Assertions.assertTrue(dfs.exists(bar));
+        Assertions.assertTrue(dfs.exists(baz));
 
         dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
         dfs.saveNamespace();
@@ -144,9 +142,9 @@
       cluster.restartNameNode();
       {
         final DistributedFileSystem dfs = cluster.getFileSystem();
-        Assert.assertTrue(dfs.exists(foo));
-        Assert.assertTrue(dfs.exists(bar));
-        Assert.assertTrue(dfs.exists(baz));
+        Assertions.assertTrue(dfs.exists(foo));
+        Assertions.assertTrue(dfs.exists(bar));
+        Assertions.assertTrue(dfs.exists(baz));
       }
     } finally {
       if(cluster != null) cluster.shutdown();
@@ -237,9 +235,9 @@
       final DistributedFileSystem dfs2 = cluster2.getFileSystem();
 
       // Check that cluster2 sees the edits made on cluster1
-      Assert.assertTrue(dfs2.exists(foo));
-      Assert.assertTrue(dfs2.exists(bar));
-      Assert.assertFalse(dfs2.exists(baz));
+      Assertions.assertTrue(dfs2.exists(foo));
+      Assertions.assertTrue(dfs2.exists(bar));
+      Assertions.assertFalse(dfs2.exists(baz));
 
       //query rolling upgrade in cluster2
       assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
@@ -249,9 +247,9 @@
       LOG.info("RESTART cluster 2");
       cluster2.restartNameNode();
       assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
-      Assert.assertTrue(dfs2.exists(foo));
-      Assert.assertTrue(dfs2.exists(bar));
-      Assert.assertTrue(dfs2.exists(baz));
+      Assertions.assertTrue(dfs2.exists(foo));
+      Assertions.assertTrue(dfs2.exists(bar));
+      Assertions.assertTrue(dfs2.exists(baz));
 
       //restart cluster with -upgrade should fail.
       try {
@@ -263,21 +261,21 @@
       LOG.info("RESTART cluster 2 again");
       cluster2.restartNameNode();
       assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
-      Assert.assertTrue(dfs2.exists(foo));
-      Assert.assertTrue(dfs2.exists(bar));
-      Assert.assertTrue(dfs2.exists(baz));
+      Assertions.assertTrue(dfs2.exists(foo));
+      Assertions.assertTrue(dfs2.exists(bar));
+      Assertions.assertTrue(dfs2.exists(baz));
 
       //finalize rolling upgrade
       final RollingUpgradeInfo finalize = dfs2.rollingUpgrade(
           RollingUpgradeAction.FINALIZE);
-      Assert.assertTrue(finalize.isFinalized());
+      Assertions.assertTrue(finalize.isFinalized());
 
       LOG.info("RESTART cluster 2 with regular startup option");
       cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
       cluster2.restartNameNode();
-      Assert.assertTrue(dfs2.exists(foo));
-      Assert.assertTrue(dfs2.exists(bar));
-      Assert.assertTrue(dfs2.exists(baz));
+      Assertions.assertTrue(dfs2.exists(foo));
+      Assertions.assertTrue(dfs2.exists(bar));
+      Assertions.assertTrue(dfs2.exists(baz));
     } finally {
       if (cluster2 != null) cluster2.shutdown();
     }
@@ -367,8 +365,8 @@
     dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
 
     dfs.mkdirs(bar);
-    Assert.assertTrue(dfs.exists(foo));
-    Assert.assertTrue(dfs.exists(bar));
+    Assertions.assertTrue(dfs.exists(foo));
+    Assertions.assertTrue(dfs.exists(bar));
 
     //truncate a file
     final int newLength = ThreadLocalRandom.current().nextInt(data.length - 1)
@@ -386,8 +384,8 @@
     cluster.restartDataNode(dnprop, true);
 
     final DistributedFileSystem dfs = cluster.getFileSystem();
-    Assert.assertTrue(dfs.exists(foo));
-    Assert.assertFalse(dfs.exists(bar));
+    Assertions.assertTrue(dfs.exists(foo));
+    Assertions.assertFalse(dfs.exists(bar));
     AppendTestUtil.checkFullFile(dfs, file, data.length, data);
   }
 
@@ -414,7 +412,7 @@
       // the datanode should be down.
       GenericTestUtils.waitForThreadTermination(
           "Async datanode shutdown thread", 100, 10000);
-      Assert.assertFalse("DataNode should exit", dn.isDatanodeUp());
+        Assertions.assertFalse(dn.isDatanodeUp(), "DataNode should exit");
 
       // ping should fail.
       assertEquals(-1, dfsadmin.run(args1));
@@ -479,20 +477,20 @@
       // start rolling upgrade
       RollingUpgradeInfo info = dfs
           .rollingUpgrade(RollingUpgradeAction.PREPARE);
-      Assert.assertTrue(info.isStarted());
+      Assertions.assertTrue(info.isStarted());
       dfs.mkdirs(bar);
 
       queryForPreparation(dfs);
 
       // The NN should have a copy of the fsimage in case of rollbacks.
-      Assert.assertTrue(fsimage.hasRollbackFSImage());
+      Assertions.assertTrue(fsimage.hasRollbackFSImage());
 
       info = dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
-      Assert.assertTrue(info.isFinalized());
-      Assert.assertTrue(dfs.exists(foo));
+      Assertions.assertTrue(info.isFinalized());
+      Assertions.assertTrue(dfs.exists(foo));
 
       // Once finalized, there should be no more fsimage for rollbacks.
-      Assert.assertFalse(fsimage.hasRollbackFSImage());
+      Assertions.assertFalse(fsimage.hasRollbackFSImage());
 
       // Should have no problem in restart and replaying edits that include
       // the FINALIZE op.
@@ -533,10 +531,10 @@
       // start rolling upgrade
       RollingUpgradeInfo info = dfs
           .rollingUpgrade(RollingUpgradeAction.PREPARE);
-      Assert.assertTrue(info.isStarted());
+      Assertions.assertTrue(info.isStarted());
 
       info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
-      Assert.assertFalse(info.createdRollbackImages());
+      Assertions.assertFalse(info.createdRollbackImages());
 
       // restart other NNs
       for (int i = 1; i < nnCount; i++) {
@@ -546,7 +544,7 @@
       queryForPreparation(dfs);
 
       // The NN should have a copy of the fsimage in case of rollbacks.
-      Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage()
+      Assertions.assertTrue(dfsCluster.getNamesystem(0).getFSImage()
               .hasRollbackFSImage());
     } finally {
       if (cluster != null) {
@@ -624,11 +622,11 @@
       ruEdit.await();
       RollingUpgradeInfo info = dfs
           .rollingUpgrade(RollingUpgradeAction.PREPARE);
-      Assert.assertTrue(info.isStarted());
+      Assertions.assertTrue(info.isStarted());
       FSImage fsimage = dfsCluster.getNamesystem(0).getFSImage();
       queryForPreparation(dfs);
       // The NN should have a copy of the fsimage in case of rollbacks.
-      Assert.assertTrue(fsimage.hasRollbackFSImage());
+      Assertions.assertTrue(fsimage.hasRollbackFSImage());
     } finally {
       CheckpointFaultInjector.set(old);
       if (cluster != null) {
@@ -656,13 +654,13 @@
       // start rolling upgrade
       RollingUpgradeInfo info = dfs
           .rollingUpgrade(RollingUpgradeAction.PREPARE);
-      Assert.assertTrue(info.isStarted());
+      Assertions.assertTrue(info.isStarted());
 
       queryForPreparation(dfs);
 
       dfs.mkdirs(foo);
       long txid = dfs.rollEdits();
-      Assert.assertTrue(txid > 0);
+      Assertions.assertTrue(txid > 0);
 
       for(int i=1; i< nnCount; i++) {
         verifyNNCheckpoint(dfsCluster, txid, i);
@@ -688,7 +686,7 @@
       }
       Thread.sleep(1000);
     }
-    Assert.fail("new checkpoint does not exist");
+    Assertions.fail("new checkpoint does not exist");
   }
 
   static void queryForPreparation(DistributedFileSystem dfs) throws IOException,
@@ -704,7 +702,7 @@
     }
 
     if (retries >= 10) {
-      Assert.fail("Query return false");
+      Assertions.fail("Query return false");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeDowngrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeDowngrade.java
index 189b5f5..607e63c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeDowngrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeDowngrade.java
@@ -31,8 +31,8 @@
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 public class TestRollingUpgradeDowngrade {
 
@@ -64,7 +64,7 @@
       // start rolling upgrade
       RollingUpgradeInfo info = dfs
           .rollingUpgrade(RollingUpgradeAction.PREPARE);
-      Assert.assertTrue(info.isStarted());
+      Assertions.assertTrue(info.isStarted());
       dfs.mkdirs(bar);
 
       TestRollingUpgrade.queryForPreparation(dfs);
@@ -72,15 +72,15 @@
 
       dfsCluster.restartNameNode(0, true, "-rollingUpgrade", "downgrade");
       // Once downgraded, there should be no more fsimage for rollbacks.
-      Assert.assertFalse(dfsCluster.getNamesystem(0).getFSImage()
+      Assertions.assertFalse(dfsCluster.getNamesystem(0).getFSImage()
           .hasRollbackFSImage());
       // shutdown NN1
       dfsCluster.shutdownNameNode(1);
       dfsCluster.transitionToActive(0);
 
       dfs = dfsCluster.getFileSystem(0);
-      Assert.assertTrue(dfs.exists(foo));
-      Assert.assertTrue(dfs.exists(bar));
+      Assertions.assertTrue(dfs.exists(foo));
+      Assertions.assertTrue(dfs.exists(bar));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java
index b5ef5ee..766419f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java
@@ -31,8 +31,8 @@
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests rollback for rolling upgrade.
@@ -56,35 +56,35 @@
     List<File> finalizedEdits = storage.getFiles(
         NNStorage.NameNodeDirType.EDITS,
         NNStorage.getFinalizedEditsFileName(1, imageTxId));
-    Assert.assertTrue(fileExists(finalizedEdits));
+    Assertions.assertTrue(fileExists(finalizedEdits));
     List<File> inprogressEdits = storage.getFiles(
         NNStorage.NameNodeDirType.EDITS,
         NNStorage.getInProgressEditsFileName(imageTxId + 1));
     // For rollback case we will have an inprogress file for future transactions
-    Assert.assertTrue(fileExists(inprogressEdits));
+    Assertions.assertTrue(fileExists(inprogressEdits));
     if (trashEndTxId > 0) {
       List<File> trashedEdits = storage.getFiles(
           NNStorage.NameNodeDirType.EDITS,
           NNStorage.getFinalizedEditsFileName(imageTxId + 1, trashEndTxId)
               + ".trash");
-      Assert.assertTrue(fileExists(trashedEdits));
+      Assertions.assertTrue(fileExists(trashedEdits));
     }
     String imageFileName = trashEndTxId > 0 ? NNStorage
         .getImageFileName(imageTxId) : NNStorage
         .getRollbackImageFileName(imageTxId);
     List<File> imageFiles = storage.getFiles(
         NNStorage.NameNodeDirType.IMAGE, imageFileName);
-    Assert.assertTrue(fileExists(imageFiles));
+    Assertions.assertTrue(fileExists(imageFiles));
   }
 
   private void checkJNStorage(File dir, long discardStartTxId,
       long discardEndTxId) {
     File finalizedEdits = new File(dir, NNStorage.getFinalizedEditsFileName(1,
         discardStartTxId - 1));
-    Assert.assertTrue(finalizedEdits.exists());
+    Assertions.assertTrue(finalizedEdits.exists());
     File trashEdits = new File(dir, NNStorage.getFinalizedEditsFileName(
         discardStartTxId, discardEndTxId) + ".trash");
-    Assert.assertTrue(trashEdits.exists());
+    Assertions.assertTrue(trashEdits.exists());
   }
 
   @Test
@@ -103,7 +103,7 @@
 
       // start rolling upgrade
       dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      Assert.assertEquals(0,
+      Assertions.assertEquals(0,
           dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
       dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
       // create new directory
@@ -125,10 +125,10 @@
       // make sure /foo is still there, but /bar is not
       INode fooNode = nn.getNamesystem().getFSDirectory()
           .getINode4Write(foo.toString());
-      Assert.assertNotNull(fooNode);
+      Assertions.assertNotNull(fooNode);
       INode barNode = nn.getNamesystem().getFSDirectory()
           .getINode4Write(bar.toString());
-      Assert.assertNull(barNode);
+      Assertions.assertNull(barNode);
 
       // check the details of NNStorage
       NNStorage storage = nn.getNamesystem().getFSImage().getStorage();
@@ -165,7 +165,7 @@
 
       // start rolling upgrade
       dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      Assert.assertEquals(0,
+      Assertions.assertEquals(0,
           dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
       dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
       // create new directory
@@ -176,8 +176,8 @@
       cluster.restartNameNode("-rollingUpgrade", "rollback");
       // make sure /foo is still there, but /bar is not
       dfs = cluster.getFileSystem();
-      Assert.assertTrue(dfs.exists(foo));
-      Assert.assertFalse(dfs.exists(bar));
+      Assertions.assertTrue(dfs.exists(foo));
+      Assertions.assertFalse(dfs.exists(bar));
 
       // check storage in JNs
       for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
@@ -223,7 +223,7 @@
 
       // start rolling upgrade
       RollingUpgradeInfo info = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
-      Assert.assertTrue(info.isStarted());
+      Assertions.assertTrue(info.isStarted());
 
       // create new directory
       dfs.mkdirs(bar);
@@ -234,9 +234,9 @@
 
       // If the query returns true, both active and the standby NN should have
       // rollback fsimage ready.
-      Assert.assertTrue(dfsCluster.getNameNode(0).getFSImage()
+      Assertions.assertTrue(dfsCluster.getNameNode(0).getFSImage()
           .hasRollbackFSImage());
-      Assert.assertTrue(dfsCluster.getNameNode(1).getFSImage()
+      Assertions.assertTrue(dfsCluster.getNameNode(1).getFSImage()
           .hasRollbackFSImage());
       
       // rollback NN0
@@ -248,8 +248,8 @@
 
       // make sure /foo is still there, but /bar is not
       dfs = dfsCluster.getFileSystem(0);
-      Assert.assertTrue(dfs.exists(foo));
-      Assert.assertFalse(dfs.exists(bar));
+      Assertions.assertTrue(dfs.exists(foo));
+      Assertions.assertFalse(dfs.exists(bar));
 
       // check the details of NNStorage
       NNStorage storage = dfsCluster.getNamesystem(0).getFSImage()
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 3e9231f..88aeaa5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -20,10 +20,7 @@
 
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -56,9 +53,9 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.util.function.Supplier;
 
@@ -76,7 +73,7 @@
   DistributedFileSystem dfs;
   private static final String NN_METRICS = "NameNodeActivity";
 
-  @Before
+  @BeforeEach
   public void startUp() throws IOException {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -88,7 +85,7 @@
     dfs = (DistributedFileSystem)fs;
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (fs != null) {
       fs.close();
@@ -132,9 +129,9 @@
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
     cluster.waitActive();
     dfs = cluster.getFileSystem();
-    
-    assertTrue("No datanode is started. Should be in SafeMode", 
-               dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+
+      assertTrue(
+              dfs.setSafeMode(SafeModeAction.SAFEMODE_GET), "No datanode is started. Should be in SafeMode");
     
     // manually set safemode.
     dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
@@ -148,10 +145,10 @@
       Thread.sleep(2000);
     } catch (InterruptedException ignored) {}
 
-    assertTrue("should still be in SafeMode",
-        dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-    assertFalse("should not be in SafeMode", 
-        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
+      assertTrue(
+              dfs.setSafeMode(SafeModeAction.SAFEMODE_GET), "should still be in SafeMode");
+      assertFalse(
+              dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE), "should not be in SafeMode");
   }
 
   /**
@@ -206,9 +203,9 @@
         + NEWLINE + "The minimum number of live datanodes is not required. "
         + "Safe mode will be turned off automatically once the thresholds have "
         + "been reached.", status);
-    assertFalse("Mis-replicated block queues should not be initialized " +
-        "until threshold is crossed",
-        NameNodeAdapter.safeModeInitializedReplQueues(nn));
+      assertFalse(
+              NameNodeAdapter.safeModeInitializedReplQueues(nn), "Mis-replicated block queues should not be initialized " +
+              "until threshold is crossed");
     
     LOG.info("Restarting one DataNode");
     cluster.restartDataNode(dnprops.remove(0));
@@ -224,8 +221,8 @@
     }, 10, 10000);
 
     final long safe = NameNodeAdapter.getSafeModeSafeBlocks(nn);
-    assertTrue("Expected first block report to make some blocks safe.", safe > 0);
-    assertTrue("Did not expect first block report to make all blocks safe.", safe < 15);
+      assertTrue(safe > 0, "Expected first block report to make some blocks safe.");
+      assertTrue(safe < 15, "Did not expect first block report to make all blocks safe.");
 
     assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
 
@@ -310,8 +307,8 @@
   public void testSafeModeExceptionText() throws Exception {
     final Path file1 = new Path("/file1");
     DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
-    assertTrue("Could not enter SM",
-        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
+      assertTrue(
+              dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER), "Could not enter SM");
     try {
       FSRun fsRun = new FSRun() {
         @Override
@@ -341,8 +338,8 @@
 
     assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
     DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
-    assertTrue("Could not enter SM", 
-        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
+      assertTrue(
+              dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER), "Could not enter SM");
 
     runFsFun("Set quota while in SM", new FSRun() { 
       @Override
@@ -491,8 +488,8 @@
       // expected
     }
 
-    assertFalse("Could not leave SM",
-        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
+      assertFalse(
+              dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE), "Could not leave SM");
   }
 
   /**
@@ -510,10 +507,10 @@
     fs = cluster.getFileSystem();
 
     String tipMsg = cluster.getNamesystem().getSafemode();
-    assertTrue("Safemode tip message doesn't look right: " + tipMsg,
-      tipMsg.contains("The number of live datanodes 0 needs an additional " +
+      assertTrue(
+              tipMsg.contains("The number of live datanodes 0 needs an additional " +
                       "1 live datanodes to reach the minimum number 1." +
-                      NEWLINE + "Safe mode will be turned off automatically"));
+                      NEWLINE + "Safe mode will be turned off automatically"), "Safemode tip message doesn't look right: " + tipMsg);
 
     // Start a datanode
     cluster.startDataNodes(conf, 1, true, null, null);
@@ -537,11 +534,11 @@
 
     // Enter safemode.
     dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-    assertTrue("State was expected to be in safemode.", dfs.isInSafeMode());
+      assertTrue(dfs.isInSafeMode(), "State was expected to be in safemode.");
 
     // Exit safemode.
     dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-    assertFalse("State was expected to be out of safemode.", dfs.isInSafeMode());
+      assertFalse(dfs.isInSafeMode(), "State was expected to be out of safemode.");
   }
   
   @Test
@@ -562,11 +559,11 @@
 
       // manually set safemode.
       dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      assertTrue("should still be in SafeMode", namenode.isInSafeMode());
+        assertTrue(namenode.isInSafeMode(), "should still be in SafeMode");
       // getBlock locations should still work since block locations exists
       checkGetBlockLocationsWorks(fs, file1);
       dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-      assertFalse("should not be in SafeMode", namenode.isInSafeMode());
+        assertFalse(namenode.isInSafeMode(), "should not be in SafeMode");
       
       
       // Now 2nd part of the tests where there aren't block locations
@@ -580,23 +577,23 @@
       System.out.println("Restarted cluster with just the NameNode");
       
       namenode = cluster.getNameNode();
-      
-      assertTrue("No datanode is started. Should be in SafeMode", 
-                 namenode.isInSafeMode());
+
+        assertTrue(
+                namenode.isInSafeMode(), "No datanode is started. Should be in SafeMode");
       FileStatus stat = fs.getFileStatus(file1);
       try {
         fs.getFileBlockLocations(stat, 0, 1000);
-        assertTrue("Should have got safemode exception", false);
+          assertTrue(false, "Should have got safemode exception");
       } catch (SafeModeException e) {
         // as expected 
       } catch (RemoteException re) {
         if (!re.getClassName().equals(SafeModeException.class.getName()))
-          assertTrue("Should have got safemode exception", false);   
+            assertTrue(false, "Should have got safemode exception");   
       }
 
 
-      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);      
-      assertFalse("Should not be in safemode", namenode.isInSafeMode());
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+        assertFalse(namenode.isInSafeMode(), "Should not be in safemode");
       checkGetBlockLocationsWorks(fs, file1);
 
     } finally {
@@ -610,9 +607,9 @@
     try {  
       fs.getFileBlockLocations(stat, 0, 1000);
     } catch (SafeModeException e) {
-      assertTrue("Should have not got safemode exception", false);
+        assertTrue(false, "Should have not got safemode exception");
     } catch (RemoteException re) {
-      assertTrue("Should have not got remote exception", false);
+        assertTrue(false, "Should have not got remote exception");
     }    
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index 74b2482c..92815a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -27,18 +27,16 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
 import java.util.List;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestSafeModeWithStripedFile {
 
@@ -59,7 +57,7 @@
     return StripedFileTestUtil.getDefaultECPolicy();
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     ecPolicy = getEcPolicy();
     dataBlocks = (short) ecPolicy.getNumDataUnits();
@@ -78,7 +76,7 @@
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
index fcf1333..32e1116 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSecureEncryptionZoneWithKMS.java
@@ -17,8 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertTrue;
-
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE;
@@ -64,12 +63,12 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -132,7 +131,7 @@
   @Rule
   public Timeout timeout = new Timeout(120000);
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     baseDir = getTestDir();
     FileUtil.fullyDelete(baseDir);
@@ -146,8 +145,8 @@
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
         baseConf);
     UserGroupInformation.setConfiguration(baseConf);
-    assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+      assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     File keytabFile = new File(baseDir, "test.keytab");
     keytab = keytabFile.getAbsolutePath();
@@ -231,7 +230,7 @@
     miniKMS.start();
   }
 
-  @AfterClass
+  @AfterAll
   public static void destroy() throws Exception {
     if (kdc != null) {
       kdc.stop();
@@ -243,7 +242,7 @@
     KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     // Start MiniDFS Cluster
     baseConf
@@ -271,7 +270,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws IOException {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
index 9dd2987..4d45235 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -43,9 +43,9 @@
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                        expected[from+idx]+" actual "+actual[idx],
-                        actual[idx], expected[from+idx]);
+        assertEquals(
+                actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
index 74cde2a..8f3c863 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -40,9 +38,8 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.MockitoUtil;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Mockito;
+import org.junit.jupiter.api.Assertions;
 
 /**
  * This class tests the access time on files.
@@ -107,7 +104,7 @@
                                                    cluster.getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+      assertEquals(numDatanodes, info.length, "Number of Datanodes ");
     FileSystem fileSys = cluster.getFileSystem();
     int replicas = 1;
     assertTrue(fileSys instanceof DistributedFileSystem);
@@ -184,10 +181,10 @@
       fileSys.setTimes(dir1, mtime4, atime4);
       // check new modification time on file
       stat = fileSys.getFileStatus(dir1);
-      assertTrue("Not matching the modification times", mtime4 == stat
-          .getModificationTime());
-      assertTrue("Not matching the access times", atime4 == stat
-          .getAccessTime());
+        assertTrue(mtime4 == stat
+                .getModificationTime(), "Not matching the modification times");
+        assertTrue(atime4 == stat
+                .getAccessTime(), "Not matching the access times");
 
       Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
       try {
@@ -247,7 +244,7 @@
                                                      cluster.getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+      assertEquals(numDatanodes, info.length, "Number of Datanodes ");
     FileSystem fileSys = cluster.getFileSystem();
     assertTrue(fileSys instanceof DistributedFileSystem);
 
@@ -339,7 +336,7 @@
       DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L);
 
       fs.setTimes(p, -1L, 123456L);
-      Assert.assertEquals(123456L, fs.getFileStatus(p).getAccessTime());
+      Assertions.assertEquals(123456L, fs.getFileStatus(p).getAccessTime());
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
index 497d450..06ba061 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -45,7 +45,7 @@
     conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
     FileSystem fs = cluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem);
+      assertTrue(fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
 
     try {
       Path root = TestDFSShell.mkdir(fs, 
@@ -60,7 +60,7 @@
         try {
           assertEquals(0, shell.run(args));
         } catch (Exception e) {
-          assertTrue("-setrep " + e, false);
+            assertTrue(false, "-setrep " + e);
         }
       }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
index 6983cde..f40ee74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.Random;
@@ -30,7 +30,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the creation of files with block-size
@@ -44,9 +44,9 @@
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                        expected[from+idx]+" actual "+actual[idx],
-                        actual[idx], expected[from+idx]);
+        assertEquals(
+                actual[idx], expected[from + idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }
@@ -55,7 +55,7 @@
       throws IOException {
     BlockLocation[] locations = fileSys.getFileBlockLocations(
         fileSys.getFileStatus(name), 0, fileSize);
-    assertEquals("Number of blocks", fileSize, locations.length);
+      assertEquals(fileSize, locations.length, "Number of blocks");
     FSDataInputStream stm = fileSys.open(name);
     byte[] expected = new byte[fileSize];
     if (simulatedStorage) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
index 32ac298..558b880 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
@@ -26,11 +26,11 @@
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
 import org.apache.hadoop.util.ChunkedArrayList;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This class includes end-to-end tests for snapshot related FsShell and
@@ -42,7 +42,7 @@
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem fs;
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetUp() throws IOException {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, 3);
@@ -51,7 +51,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdown() throws IOException{
     if(fs != null){
       fs.close();
@@ -61,7 +61,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     fs.mkdirs(new Path("/sub1"));
     fs.mkdirs(new Path("/Fully/QPath"));
@@ -70,7 +70,7 @@
     fs.mkdirs(new Path("/sub1/sub1sub2"));
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (fs.exists(new Path("/sub1"))) {
       if (fs.exists(new Path("/sub1/.snapshot"))) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index 3056b43..51af5273 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -30,11 +30,11 @@
 import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -87,7 +87,7 @@
     }
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void startUpCluster() throws IOException {
     // Set short retry timeouts so this test runs faster
     CONF.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
@@ -100,20 +100,20 @@
     cluster = qjmhaCluster.getDfsCluster();
   }
 
-  @Before
+  @BeforeEach
   public void before() throws IOException, URISyntaxException {
     dfs = HATestUtil.configureObserverReadFs(
         cluster, CONF, ORPPwithAlignmentContexts.class, true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutDownCluster() throws IOException {
     if (qjmhaCluster != null) {
       qjmhaCluster.shutdown();
     }
   }
 
-  @After
+  @AfterEach
   public void after() throws IOException {
     killWorkers();
     cluster.transitionToStandby(1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyPermissionSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyPermissionSettings.java
index 81f9126..37b6417 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyPermissionSettings.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStoragePolicyPermissionSettings.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
 
 import java.io.IOException;
 import java.lang.reflect.Field;
@@ -34,9 +34,9 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 public class TestStoragePolicyPermissionSettings {
 
@@ -51,7 +51,7 @@
   private static UserGroupInformation nonAdmin;
   private static UserGroupInformation admin;
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetUp() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
@@ -65,7 +65,7 @@
         new String[]{"supergroup"});
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdown() throws IOException {
     if (fs != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java
index a00f67a..de09032 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java
@@ -26,9 +26,9 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 import java.io.IOException;
@@ -38,10 +38,7 @@
 import java.util.List;
 import java.util.Random;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Tests append on erasure coded file.
@@ -68,7 +65,7 @@
   private Path dir = new Path("/TestFileAppendStriped");
   private HdfsConfiguration conf = new HdfsConfiguration();
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).build();
@@ -78,7 +75,7 @@
     dfs.setErasureCodingPolicy(dir, null);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -143,7 +140,7 @@
 
     RemoteIterator<OpenFileEntry> listOpenFiles = dfs
         .listOpenFiles(EnumSet.copyOf(types), file.toString());
-    assertFalse("No file should be open after append failure",
-        listOpenFiles.hasNext());
+      assertFalse(
+              listOpenFiles.hasNext(), "No file should be open after append failure");
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
index 7699e3e..11de3cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithEncryptionZones.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
@@ -36,9 +36,9 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
@@ -78,7 +78,7 @@
         new Path(testRootDir.toString(), "test.jks").toUri();
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration();
     fsHelper = new FileSystemTestHelper();
@@ -114,7 +114,7 @@
         .getProvider());
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -178,8 +178,8 @@
         // Delete /zones/zone1 should not succeed as current user is not admin
         String[] argv = new String[]{"-rm", "-r", zone1.toString()};
         int res = ToolRunner.run(shell, argv);
-        assertEquals("Non-admin could delete an encryption zone with multiple" +
-            " users : " + zone1, 1, res);
+          assertEquals(1, res, "Non-admin could delete an encryption zone with multiple" +
+                  " users : " + zone1);
         return null;
       }
     });
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
index 94b9c17..a743626 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.apache.hadoop.fs.CommonConfigurationKeys
     .IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
@@ -51,10 +49,10 @@
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.FixMethodOrder;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.runners.MethodSorters;
 
 import java.io.File;
@@ -118,7 +116,7 @@
     return file;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     baseDir = getTestDir();
     FileUtil.fullyDelete(baseDir);
@@ -132,8 +130,8 @@
     SecurityUtil.setAuthenticationMethod(UserGroupInformation
         .AuthenticationMethod.KERBEROS, baseConf);
     UserGroupInformation.setConfiguration(baseConf);
-    assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+      assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     File keytabFile = new File(baseDir, "test.keytab");
     keytab = keytabFile.getAbsolutePath();
@@ -227,7 +225,7 @@
     shell = new FsShell(clientConf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void destroy() {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {
@@ -267,16 +265,16 @@
         "/" + CURRENT);
     String trashPath = trashDir.toString() + encFile1.toString();
     Path deletedFile = verifyTrashLocationWithShellDelete(encFile1);
-    assertEquals("Deleted file not at the expected trash location: " +
-        trashPath, trashPath, deletedFile.toUri().getPath());
+      assertEquals(trashPath, deletedFile.toUri().getPath(), "Deleted file not at the expected trash location: " +
+              trashPath);
 
     //Verify Trash checkpoint outside the encryption zone when the whole
     // encryption zone is deleted and moved
     trashPath = fs.getHomeDirectory().toUri().getPath() + "/" + fs
         .TRASH_PREFIX + "/" + CURRENT + zone2;
     Path deletedDir = verifyTrashLocationWithShellDelete(zone2);
-    assertEquals("Deleted zone not at the expected trash location: " +
-        trashPath, trashPath, deletedDir.toUri().getPath());
+      assertEquals(trashPath, deletedDir.toUri().getPath(), "Deleted zone not at the expected trash location: " +
+              trashPath);
   }
 
   @Test
@@ -344,16 +342,16 @@
     //Delete empty directory with -r option
     String[] argv1 = new String[]{"-rm", "-r", zone1.toString()};
     int res = ToolRunner.run(shell, argv1);
-    assertEquals("rm failed", 0, res);
-    assertTrue("Empty directory not deleted even with -r : " + trashDir1, fs
-        .exists(trashDir1));
+      assertEquals(0, res, "rm failed");
+      assertTrue(fs
+              .exists(trashDir1), "Empty directory not deleted even with -r : " + trashDir1);
 
     //Delete empty directory without -r option
     String[] argv2 = new String[]{"-rm", zone2.toString()};
     res = ToolRunner.run(shell, argv2);
-    assertEquals("rm on empty directory did not fail", 1, res);
-    assertTrue("Empty directory deleted without -r : " + trashDir2, !fs.exists(
-        trashDir2));
+      assertEquals(1, res, "rm on empty directory did not fail");
+      assertTrue(!fs.exists(
+              trashDir2), "Empty directory deleted without -r : " + trashDir2);
   }
 
   @Test
@@ -371,12 +369,12 @@
 
     String[] argv = new String[]{"-rm", "-r", encFile1.toString()};
     int res = ToolRunner.run(shell, argv);
-    assertEquals("rm failed", 0, res);
+      assertEquals(0, res, "rm failed");
 
     String[] argvDeleteTrash = new String[]{"-rm", "-r", trashFile.toString()};
     int resDeleteTrash = ToolRunner.run(shell, argvDeleteTrash);
-    assertEquals("rm failed", 0, resDeleteTrash);
-    assertFalse("File deleted from Trash : " + trashFile, fs.exists(trashFile));
+      assertEquals(0, resDeleteTrash, "rm failed");
+      assertFalse(fs.exists(trashFile), "File deleted from Trash : " + trashFile);
   }
 
   @Test
@@ -393,15 +391,15 @@
         encFile1);
     String[] argv = new String[]{"-rm", "-r", encFile1.toString()};
     int res = ToolRunner.run(shell, argv);
-    assertEquals("rm failed", 0, res);
+      assertEquals(0, res, "rm failed");
 
-    assertTrue("File not in trash : " + trashFile, fs.exists(trashFile));
+      assertTrue(fs.exists(trashFile), "File not in trash : " + trashFile);
     cluster.restartNameNode(0);
     cluster.waitActive();
     fs = cluster.getFileSystem();
 
-    assertTrue("On Namenode restart, file deleted from trash : " +
-        trashFile, fs.exists(trashFile));
+      assertTrue(fs.exists(trashFile), "On Namenode restart, file deleted from trash : " +
+              trashFile);
   }
 
   private Path verifyTrashLocationWithShellDelete(Path path)
@@ -410,8 +408,8 @@
     final Path trashFile = new Path(shell.getCurrentTrashDir(path) + "/" +
         path);
     File deletedFile = new File(String.valueOf(trashFile));
-    assertFalse("File already present in Trash before delete", deletedFile
-        .exists());
+      assertFalse(deletedFile
+              .exists(), "File already present in Trash before delete");
 
     DFSTestUtil.verifyDelete(shell, fs, path, trashFile, true);
     return trashFile;
@@ -420,23 +418,23 @@
   private void verifyTrashExpunge(List<Path> trashFiles) throws Exception {
     String[] argv = new String[]{"-expunge"};
     int res = ToolRunner.run(shell, argv);
-    assertEquals("expunge failed", 0, res);
+      assertEquals(0, res, "expunge failed");
 
     for (Path trashFile : trashFiles) {
-      assertFalse("File exists in trash after expunge : " + trashFile, fs
-          .exists(trashFile));
+        assertFalse(fs
+                .exists(trashFile), "File exists in trash after expunge : " + trashFile);
     }
   }
 
   private void verifyDeleteWithSkipTrash(Path path) throws Exception {
-    assertTrue(path + " file does not exist", fs.exists(path));
+      assertTrue(fs.exists(path), path + " file does not exist");
 
     final Path trashFile = new Path(shell.getCurrentTrashDir(path) + "/" +
         path);
 
     String[] argv = new String[]{"-rm", "-r", "-skipTrash", path.toString()};
     int res = ToolRunner.run(shell, argv);
-    assertEquals("rm failed", 0, res);
-    assertFalse("File in trash even with -skipTrash", fs.exists(trashFile));
+      assertEquals(0, res, "rm failed");
+      assertFalse(fs.exists(trashFile), "File in trash even with -skipTrash");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
index 4f33ce7..2efe0d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
@@ -27,18 +27,18 @@
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
 import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
-import org.junit.Assert;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * Test unset and change directory's erasure coding policy.
@@ -62,7 +62,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
@@ -78,7 +78,7 @@
     DFSTestUtil.enableAllECPolicies(fs);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -114,18 +114,18 @@
     // ec_file should has EC policy
     ErasureCodingPolicy tempEcPolicy =
         fs.getErasureCodingPolicy(ecFilePath);
-    Assert.assertTrue("Erasure coding policy mismatch!",
-        tempEcPolicy.getName().equals(ecPolicy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ecPolicy.getName()), "Erasure coding policy mismatch!");
 
     // rep_file should not have EC policy
     tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
-    Assert.assertNull("Replicate file should not have erasure coding policy!",
-        tempEcPolicy);
+      Assertions.assertNull(
+              tempEcPolicy, "Replicate file should not have erasure coding policy!");
 
     // Directory should not return erasure coding policy
     tempEcPolicy = fs.getErasureCodingPolicy(dirPath);
-    Assert.assertNull("Directory should no have erasure coding policy set!",
-        tempEcPolicy);
+      Assertions.assertNull(
+              tempEcPolicy, "Directory should no have erasure coding policy set!");
 
     fs.delete(dirPath, true);
   }
@@ -159,19 +159,19 @@
     // ec_6_3_file should has RS-6-3 EC policy
     ErasureCodingPolicy tempEcPolicy =
         fs.getErasureCodingPolicy(ec63FilePath);
-    Assert.assertTrue("Erasure coding policy mismatch!",
-        tempEcPolicy.getName().equals(ecPolicy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ecPolicy.getName()), "Erasure coding policy mismatch!");
 
     // ec_3_2_file should have RS-3-2 policy
     tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath);
-    Assert.assertTrue("Erasure coding policy mismatch!",
-        tempEcPolicy.getName().equals(ec32Policy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ec32Policy.getName()), "Erasure coding policy mismatch!");
 
     // Child directory should have RS-3-2 policy
     tempEcPolicy = fs.getErasureCodingPolicy(childDir);
-    Assert.assertTrue(
-        "Directory should have erasure coding policy set!",
-        tempEcPolicy.getName().equals(ec32Policy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ec32Policy.getName()),
+              "Directory should have erasure coding policy set!");
 
     // Unset EC policy on child directory
     fs.unsetErasureCodingPolicy(childDir);
@@ -179,14 +179,14 @@
 
     // ec_6_3_file_2 should have RS-6-3 policy
     tempEcPolicy = fs.getErasureCodingPolicy(ec63FilePath2);
-    Assert.assertTrue("Erasure coding policy mismatch!",
-        tempEcPolicy.getName().equals(ecPolicy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ecPolicy.getName()), "Erasure coding policy mismatch!");
 
     // Child directory should have RS-6-3 policy now
     tempEcPolicy = fs.getErasureCodingPolicy(childDir);
-    Assert.assertTrue(
-        "Directory should have erasure coding policy set!",
-        tempEcPolicy.getName().equals(ecPolicy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ecPolicy.getName()),
+              "Directory should have erasure coding policy set!");
 
     fs.delete(parentDir, true);
   }
@@ -218,18 +218,18 @@
     // ec_file should has EC policy set
     ErasureCodingPolicy tempEcPolicy =
         fs.getErasureCodingPolicy(ecFilePath);
-    Assert.assertTrue("Erasure coding policy mismatch!",
-        tempEcPolicy.getName().equals(ecPolicy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ecPolicy.getName()), "Erasure coding policy mismatch!");
 
     // rep_file should not have EC policy set
     tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
-    Assert.assertNull("Replicate file should not have erasure coding policy!",
-        tempEcPolicy);
+      Assertions.assertNull(
+              tempEcPolicy, "Replicate file should not have erasure coding policy!");
 
     // Directory should not return erasure coding policy
     tempEcPolicy = fs.getErasureCodingPolicy(rootPath);
-    Assert.assertNull("Directory should not have erasure coding policy set!",
-        tempEcPolicy);
+      Assertions.assertNull(
+              tempEcPolicy, "Directory should not have erasure coding policy set!");
 
     fs.delete(rootPath, true);
   }
@@ -263,19 +263,19 @@
     // ec_6_3_file should has RS-6-3 ec policy set
     ErasureCodingPolicy tempEcPolicy =
         fs.getErasureCodingPolicy(ec63FilePath);
-    Assert.assertTrue("Erasure coding policy mismatch!",
-        tempEcPolicy.getName().equals(ecPolicy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ecPolicy.getName()), "Erasure coding policy mismatch!");
 
     // ec_3_2_file should have RS-3-2 policy
     tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath);
-    Assert.assertTrue("Erasure coding policy mismatch!",
-        tempEcPolicy.getName().equals(ec32Policy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ec32Policy.getName()), "Erasure coding policy mismatch!");
 
     // Root directory should have RS-3-2 policy
     tempEcPolicy = fs.getErasureCodingPolicy(rootPath);
-    Assert.assertTrue(
-        "Directory should have erasure coding policy!",
-        tempEcPolicy.getName().equals(ec32Policy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ec32Policy.getName()),
+              "Directory should have erasure coding policy!");
 
     fs.delete(rootPath, true);
   }
@@ -302,21 +302,21 @@
     // ec_file should has EC policy set
     ErasureCodingPolicy tempEcPolicy =
         fs.getErasureCodingPolicy(ecFilePath);
-    Assert.assertTrue("Erasure coding policy mismatch!",
-        tempEcPolicy.getName().equals(ecPolicy.getName()));
+      Assertions.assertTrue(
+              tempEcPolicy.getName().equals(ecPolicy.getName()), "Erasure coding policy mismatch!");
 
     // rep_file should not have EC policy set
     tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
-    Assert.assertNull("Replicate file should not have erasure coding policy!",
-        tempEcPolicy);
+      Assertions.assertNull(
+              tempEcPolicy, "Replicate file should not have erasure coding policy!");
     tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath2);
-    Assert.assertNull("Replicate file should not have erasure coding policy!",
-        tempEcPolicy);
+      Assertions.assertNull(
+              tempEcPolicy, "Replicate file should not have erasure coding policy!");
 
     // Directory should not return erasure coding policy
     tempEcPolicy = fs.getErasureCodingPolicy(ecDirPath);
-    Assert.assertNull("Directory should not have erasure coding policy set!",
-        tempEcPolicy);
+      Assertions.assertNull(
+              tempEcPolicy, "Directory should not have erasure coding policy set!");
 
     fs.delete(ecDirPath, true);
   }
@@ -382,10 +382,10 @@
   @Test
   public void testUnsetEcPolicyInEditLog() throws IOException {
     fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
-    Assert.assertEquals(ecPolicy, fs.getErasureCodingPolicy(new Path("/")));
+    Assertions.assertEquals(ecPolicy, fs.getErasureCodingPolicy(new Path("/")));
     fs.getClient().unsetErasureCodingPolicy("/");
 
     cluster.restartNameNode(true);
-    Assert.assertNull(fs.getErasureCodingPolicy(new Path("/")));
+    Assertions.assertNull(fs.getErasureCodingPolicy(new Path("/")));
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java
index da0cb59..ebaa3e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystem.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.net.URI;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java
index 810c4cb..c26e107 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java
@@ -26,10 +26,10 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.io.IOException;
@@ -41,7 +41,7 @@
   private static String defaultWorkingDirectory;
   private static Configuration conf = new HdfsConfiguration();
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws IOException {
     final File basedir = GenericTestUtils.getRandomizedTestDir();
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
@@ -53,7 +53,7 @@
         "/user/" + UserGroupInformation.getCurrentUser().getShortUserName();
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName());
     URI defaultFSURI =
@@ -65,7 +65,7 @@
     fs = FileSystem.get(conf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownAfter() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemWithMountLinks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemWithMountLinks.java
index 1e66252..1c74afd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemWithMountLinks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemWithMountLinks.java
@@ -25,8 +25,8 @@
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.fs.viewfs.TestViewFileSystemOverloadSchemeWithHdfsScheme;
 import org.apache.hadoop.fs.viewfs.ViewFsTestSetup;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 import java.io.IOException;
 import java.net.URI;
@@ -144,14 +144,14 @@
       dst = new Path("/InternalDirDoesNotExistInFallback/file");
       fs.create(src).close();
       // If fallback does not have same structure as internal, rename will fail.
-      Assert.assertFalse(fs.rename(src, dst));
+      Assertions.assertFalse(fs.rename(src, dst));
     }
   }
 
   private void verifyRename(FileSystem fs, Path src, Path dst)
       throws IOException {
     fs.rename(src, dst);
-    Assert.assertFalse(fs.exists(src));
-    Assert.assertTrue(fs.exists(dst));
+    Assertions.assertFalse(fs.exists(src));
+    Assertions.assertTrue(fs.exists(dst));
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
index e159914..6d43ea9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
@@ -26,10 +26,10 @@
 import org.apache.hadoop.hdfs.server.datanode.*;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.*;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java
index 3a9065a..1054713 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java
@@ -32,10 +32,10 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestWriteRead {
 
@@ -67,7 +67,7 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(TestWriteRead.class);
 
-  @Before
+  @BeforeEach
   public void initJunitModeTest() throws Exception {
     LOG.info("initJunitModeTest");
 
@@ -85,7 +85,7 @@
     mfs.mkdirs(rootdir);
   }
 
-  @After
+  @AfterEach
   public void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -113,7 +113,7 @@
     // need to run long enough to fail: takes 25 to 35 seec on Mac
     int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE, rdBeginPos);
     LOG.info("Summary status from test1: status= " + stat);
-    Assert.assertEquals(0, stat);
+    Assertions.assertEquals(0, stat);
   }
 
   /** Junit Test position read while writing. */
@@ -123,7 +123,7 @@
     positionReadOption = true;   // position read
     long rdBeginPos = 0;
     int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE, rdBeginPos);
-    Assert.assertEquals(0, stat);
+    Assertions.assertEquals(0, stat);
   }
 
   /** Junit Test position read of the current block being written. */
@@ -135,7 +135,7 @@
     long rdBeginPos = blockSize+1;
     int numTimes=5;
     int stat = testWriteAndRead(fname, numTimes, wrChunkSize, rdBeginPos);
-    Assert.assertEquals(0, stat);
+    Assertions.assertEquals(0, stat);
   }
    
   // equivalent of TestWriteRead1
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index e1f7524..0f62121 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -32,10 +32,10 @@
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.Rule;
 import org.junit.rules.Timeout;
 
@@ -72,7 +72,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
@@ -83,7 +83,7 @@
         ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -322,9 +322,9 @@
     }
     try {
       fs.concat(target, srcs);
-      Assert.fail("non-ec file shouldn't concat with ec file");
+      Assertions.fail("non-ec file shouldn't concat with ec file");
     } catch (RemoteException e){
-      Assert.assertTrue(e.getMessage()
+      Assertions.assertTrue(e.getMessage()
           .contains("have different erasure coding policy"));
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
index 566f91b..3eb192d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
@@ -25,9 +25,9 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Disabled;
 import org.slf4j.event.Level;
 
 import java.io.IOException;
@@ -71,7 +71,7 @@
 
   // Test writing file with some Datanodes failure
   // TODO: enable this test after HDFS-8704 and HDFS-9040
-  @Ignore
+  @Disabled
   @Test(timeout = 300000)
   public void testWriteStripedFileWithDNFailure() throws IOException {
     for (int fileLength : fileLengths) {
@@ -125,10 +125,10 @@
 
     int[] dataDNFailureIndices = StripedFileTestUtil.randomArray(0, dataBlocks,
         dataDNFailureNum);
-    Assert.assertNotNull(dataDNFailureIndices);
+    Assertions.assertNotNull(dataDNFailureIndices);
     int[] parityDNFailureIndices = StripedFileTestUtil.randomArray(dataBlocks,
         dataBlocks + parityBlocks, parityDNFailureNum);
-    Assert.assertNotNull(parityDNFailureIndices);
+    Assertions.assertNotNull(parityDNFailureIndices);
 
     int[] failedDataNodes = new int[dataDNFailureNum + parityDNFailureNum];
     System.arraycopy(dataDNFailureIndices, 0, failedDataNodes,
@@ -150,7 +150,7 @@
 
     // make sure the expected number of Datanode have been killed
     int dnFailureNum = dataDNFailureNum + parityDNFailureNum;
-    Assert.assertEquals(cluster.getDataNodes().size(), numDNs - dnFailureNum);
+    Assertions.assertEquals(cluster.getDataNodes().size(), numDNs - dnFailureNum);
 
     byte[] smallBuf = new byte[1024];
     byte[] largeBuf = new byte[fileLength + 100];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
index 826299b..7763601 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
index fd9963e..be86953 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
@@ -24,7 +24,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_DEFAULT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_KEY;
-import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
 
 import java.io.File;
 import java.io.IOException;
@@ -62,13 +62,11 @@
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.hamcrest.CoreMatchers;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles;
 import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
@@ -85,13 +83,13 @@
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
-  @Before
+  @BeforeEach
   public void init() {
     DomainSocket.disableBindPathValidation();
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     DFSInputStream.tcpReadsDisabledForTesting = false;
     BlockReaderFactory.createShortCircuitReplicaInfoCallback = null;
@@ -145,7 +143,7 @@
     byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    Assertions.assertTrue(Arrays.equals(contents, expected));
     cluster.shutdown();
     sockDir.close();
   }
@@ -198,7 +196,7 @@
         public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
           Uninterruptibles.awaitUninterruptibly(latch);
           if (!creationIsBlocked.compareAndSet(true, false)) {
-            Assert.fail("there were multiple calls to "
+            Assertions.fail("there were multiple calls to "
                 + "createShortCircuitReplicaInfo.  Only one was expected.");
           }
           return null;
@@ -222,10 +220,10 @@
       public void run() {
         try {
           byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
-          Assert.assertFalse(creationIsBlocked.get());
+          Assertions.assertFalse(creationIsBlocked.get());
           byte expected[] = DFSTestUtil.
               calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-          Assert.assertTrue(Arrays.equals(contents, expected));
+          Assertions.assertTrue(Arrays.equals(contents, expected));
         } catch (Throwable e) {
           LOG.error("readerRunnable error", e);
           testFailed.set(true);
@@ -244,7 +242,7 @@
     }
     cluster.shutdown();
     sockDir.close();
-    Assert.assertFalse(testFailed.get());
+    Assertions.assertFalse(testFailed.get());
   }
 
   /**
@@ -303,11 +301,11 @@
           try {
             blockReader = BlockReaderTestUtil.getBlockReader(
                 cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
-            Assert.fail("expected getBlockReader to fail the first time.");
+            Assertions.fail("expected getBlockReader to fail the first time.");
           } catch (Throwable t) {
-            Assert.assertTrue("expected to see 'TCP reads were disabled " +
-                "for testing' in exception " + t, t.getMessage().contains(
-                "TCP reads were disabled for testing"));
+              Assertions.assertTrue(t.getMessage().contains(
+                      "TCP reads were disabled for testing"), "expected to see 'TCP reads were disabled " +
+                      "for testing' in exception " + t);
           } finally {
             if (blockReader != null) blockReader.close(); // keep findbugs happy
           }
@@ -344,7 +342,7 @@
     }
     cluster.shutdown();
     sockDir.close();
-    Assert.assertFalse(testFailed.get());
+    Assertions.assertFalse(testFailed.get());
   }
 
   /**
@@ -388,7 +386,7 @@
             calculateFileContentsFromSeed(seed, testFileLen);
 
         try (FSDataInputStream in = dfs.open(testFile)) {
-          Assert.assertEquals(0,
+          Assertions.assertEquals(0,
               dfs.getClient().getClientContext().getShortCircuitCache(0)
                   .getReplicaInfoMapSize());
 
@@ -402,7 +400,7 @@
               .setMaxTotalSize(0);
           LOG.info("Unbuffering");
           in.unbuffer();
-          Assert.assertEquals(0,
+          Assertions.assertEquals(0,
               dfs.getClient().getClientContext().getShortCircuitCache(0)
                   .getReplicaInfoMapSize());
 
@@ -430,8 +428,8 @@
   private void validateReadResult(final DistributedFileSystem dfs,
       final byte[] expected, final byte[] actual,
       final int expectedScrRepMapSize) {
-    Assert.assertThat(expected, CoreMatchers.is(actual));
-    Assert.assertEquals(expectedScrRepMapSize,
+    assertThat(expected, CoreMatchers.is(actual));
+    Assertions.assertEquals(expectedScrRepMapSize,
         dfs.getClient().getClientContext().getShortCircuitCache(0)
             .getReplicaInfoMapSize());
   }
@@ -465,7 +463,7 @@
     byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    Assertions.assertTrue(Arrays.equals(contents, expected));
     final ShortCircuitCache cache =
         fs.getClient().getClientContext().getShortCircuitCache(0);
     final DatanodeInfo datanode = new DatanodeInfoBuilder()
@@ -475,11 +473,11 @@
       @Override
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
-        Assert.assertEquals(1,  info.size());
+        Assertions.assertEquals(1,  info.size());
         PerDatanodeVisitorInfo vinfo = info.get(datanode);
-        Assert.assertTrue(vinfo.disabled);
-        Assert.assertEquals(0, vinfo.full.size());
-        Assert.assertEquals(0, vinfo.notFull.size());
+        Assertions.assertTrue(vinfo.disabled);
+        Assertions.assertEquals(0, vinfo.full.size());
+        Assertions.assertEquals(0, vinfo.notFull.size());
       }
     });
     cluster.shutdown();
@@ -514,10 +512,10 @@
     byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    Assertions.assertTrue(Arrays.equals(contents, expected));
     final ShortCircuitCache cache =
         fs.getClient().getClientContext().getShortCircuitCache(0);
-    Assert.assertEquals(null, cache.getDfsClientShmManager());
+    Assertions.assertEquals(null, cache.getDfsClientShmManager());
     cluster.shutdown();
     sockDir.close();
   }
@@ -546,11 +544,11 @@
     byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    Assertions.assertTrue(Arrays.equals(contents, expected));
     final ShortCircuitCache cache =
         fs.getClient().getClientContext().getShortCircuitCache(0);
     cache.close();
-    Assert.assertTrue(cache.getDfsClientShmManager().
+    Assertions.assertTrue(cache.getDfsClientShmManager().
         getDomainSocketWatcher().isClosed());
     cluster.shutdown();
     sockDir.close();
@@ -649,7 +647,7 @@
       thread.interrupt();
       sem.release();
     }
-    Assert.assertFalse(testFailed.get());
+    Assertions.assertFalse(testFailed.get());
 
     // We should be able to read from the file without
     // getting a ClosedChannelException.
@@ -663,10 +661,10 @@
     }
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(buf, expected));
+    Assertions.assertTrue(Arrays.equals(buf, expected));
 
     // Another ShortCircuitReplica object should have been created.
-    Assert.assertEquals(2, replicasCreated.get());
+    Assertions.assertEquals(2, replicasCreated.get());
 
     dfs.close();
     cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
index 534243d..45c3a57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.hamcrest.CoreMatchers.equalTo;
-
 import java.io.EOFException;
 import java.io.File;
 import java.io.FileInputStream;
@@ -58,22 +56,22 @@
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestBlockReaderLocal {
   private static TemporarySocketDirectory sockDir;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() {
     sockDir = new TemporarySocketDirectory();
     DomainSocket.disableBindPathValidation();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws IOException {
     sockDir.close();
   }
@@ -82,7 +80,7 @@
       int off2, int len) {
     for (int i = 0; i < len; i++) {
       if (buf1[off1 + i] != buf2[off2 + i]) {
-        Assert.fail("arrays differ at byte " +  i + ". " +
+        Assertions.fail("arrays differ at byte " +  i + ". " +
           "The first array has " + (int)buf1[off1 + i] +
           ", but the second array has " + (int)buf2[off2 + i]);
       }
@@ -138,7 +136,7 @@
   public void runBlockReaderLocalTest(BlockReaderLocalTest test,
       boolean checksum, long readahead, int shortCircuitCachesNum)
           throws IOException {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
     MiniDFSCluster cluster = null;
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
@@ -170,10 +168,10 @@
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -221,8 +219,8 @@
         metaIn = null;
         test.doTest(blockReaderLocal, original, i * blockSize);
         // BlockReaderLocal should not alter the file position.
-        Assert.assertEquals(0, streams[0].getChannel().position());
-        Assert.assertEquals(0, streams[1].getChannel().position());
+        Assertions.assertEquals(0, streams[0].getChannel().position());
+        Assertions.assertEquals(0, streams[1].getChannel().position());
       }
       cluster.shutdown();
       cluster = null;
@@ -269,7 +267,7 @@
       reader.readFully(buf, 1537, 514);
       assertArrayRegionsEqual(original, 1537, buf, 1537, 514);
       // Readahead is always at least the size of one chunk in this test.
-      Assert.assertTrue(reader.getMaxReadaheadLength() >=
+      Assertions.assertTrue(reader.getMaxReadaheadLength() >=
           BlockReaderLocalTest.BYTES_PER_CHECKSUM);
     }
   }
@@ -489,7 +487,7 @@
       if (usingChecksums) {
         try {
           reader.readFully(buf, 0, 10);
-          Assert.fail("did not detect corruption");
+          Assertions.fail("did not detect corruption");
         } catch (IOException e) {
           // expected
         }
@@ -539,11 +537,11 @@
         reader.readFully(buf, 816, 900);
         if (usingChecksums) {
           // We should detect the corruption when using a checksum file.
-          Assert.fail("did not detect corruption");
+          Assertions.fail("did not detect corruption");
         }
       } catch (ChecksumException e) {
         if (!usingChecksums) {
-          Assert.fail("didn't expect to get ChecksumException: not " +
+          Assertions.fail("didn't expect to get ChecksumException: not " +
               "using checksums.");
         }
       }
@@ -640,7 +638,7 @@
     @Override
     public void doTest(BlockReaderLocal reader, byte original[])
         throws IOException {
-      Assert.assertTrue(!reader.getVerifyChecksum());
+      Assertions.assertTrue(!reader.getVerifyChecksum());
       ByteBuffer buf = ByteBuffer.wrap(new byte[TEST_LENGTH]);
       reader.skip(1);
       readFully(reader, buf, 1, 9);
@@ -663,15 +661,15 @@
     public void doTest(BlockReaderLocal reader, byte original[])
         throws IOException {
       byte emptyArr[] = new byte[0];
-      Assert.assertEquals(0, reader.read(emptyArr, 0, 0));
+      Assertions.assertEquals(0, reader.read(emptyArr, 0, 0));
       ByteBuffer emptyBuf = ByteBuffer.wrap(emptyArr);
-      Assert.assertEquals(0, reader.read(emptyBuf));
+      Assertions.assertEquals(0, reader.read(emptyBuf));
       reader.skip(1);
-      Assert.assertEquals(0, reader.read(emptyArr, 0, 0));
-      Assert.assertEquals(0, reader.read(emptyBuf));
+      Assertions.assertEquals(0, reader.read(emptyArr, 0, 0));
+      Assertions.assertEquals(0, reader.read(emptyBuf));
       reader.skip(BlockReaderLocalTest.TEST_LENGTH - 1);
-      Assert.assertEquals(-1, reader.read(emptyArr, 0, 0));
-      Assert.assertEquals(-1, reader.read(emptyBuf));
+      Assertions.assertEquals(-1, reader.read(emptyArr, 0, 0));
+      Assertions.assertEquals(-1, reader.read(emptyBuf));
     }
   }
 
@@ -743,7 +741,7 @@
   }
 
   private void testStatistics(boolean isShortCircuit) throws Exception {
-    Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+    Assumptions.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
     HdfsConfiguration conf = new HdfsConfiguration();
     TemporarySocketDirectory sockDir = null;
     if (isShortCircuit) {
@@ -773,25 +771,25 @@
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
       IOUtils.readFully(fsIn, original, 0,
           BlockReaderLocalTest.TEST_LENGTH);
       HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
-      Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
+      Assertions.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
           dfsIn.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
+      Assertions.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
           dfsIn.getReadStatistics().getTotalLocalBytesRead());
       if (isShortCircuit) {
-        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
+        Assertions.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
             dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
       } else {
-        Assert.assertEquals(0,
+        Assertions.assertEquals(0,
             dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
       }
       fsIn.close();
@@ -838,9 +836,9 @@
         IOUtils.readFully(in, buf, 0, length);
 
         ReadStatistics stats = in.getReadStatistics();
-        Assert.assertEquals(BlockType.CONTIGUOUS, stats.getBlockType());
-        Assert.assertEquals(length, stats.getTotalBytesRead());
-        Assert.assertEquals(length, stats.getTotalLocalBytesRead());
+        Assertions.assertEquals(BlockType.CONTIGUOUS, stats.getBlockType());
+        Assertions.assertEquals(length, stats.getTotalBytesRead());
+        Assertions.assertEquals(length, stats.getTotalLocalBytesRead());
       }
 
       Path ecFile = new Path(ecDir, "file2");
@@ -855,10 +853,10 @@
         IOUtils.readFully(in, buf, 0, length);
 
         ReadStatistics stats = in.getReadStatistics();
-        Assert.assertEquals(BlockType.STRIPED, stats.getBlockType());
-        Assert.assertEquals(length, stats.getTotalLocalBytesRead());
-        Assert.assertEquals(length, stats.getTotalBytesRead());
-        Assert.assertTrue(stats.getTotalEcDecodingTimeMillis() > 0);
+        Assertions.assertEquals(BlockType.STRIPED, stats.getBlockType());
+        Assertions.assertEquals(length, stats.getTotalLocalBytesRead());
+        Assertions.assertEquals(length, stats.getTotalBytesRead());
+        Assertions.assertTrue(stats.getTotalEcDecodingTimeMillis() > 0);
       }
     }
   }
@@ -878,7 +876,7 @@
       reader.readFully(buf, 1537, 514);
       assertArrayRegionsEqual(original, 1537 + shift, buf, 1537, 514);
       // Readahead is always at least the size of one chunk in this test.
-      Assert.assertTrue(reader.getMaxReadaheadLength() >=
+      Assertions.assertTrue(reader.getMaxReadaheadLength() >=
               BlockReaderLocalTest.BYTES_PER_CHECKSUM);
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java
index 285cdb6..7674544 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -49,13 +49,13 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestBlockReaderLocalLegacy {
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws IOException {
     DFSInputStream.tcpReadsDisabledForTesting = true;
     DomainSocket.disableBindPathValidation();
@@ -107,7 +107,7 @@
 
     ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, path);
     int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
-    assertEquals("All replicas not corrupted", REPL_FACTOR, blockFilesCorrupted);
+      assertEquals(REPL_FACTOR, blockFilesCorrupted, "All replicas not corrupted");
 
     FSDataInputStream dis = cluster.getFileSystem().open(path);
     ByteBuffer buf = ByteBuffer.allocateDirect((int)FILE_LENGTH);
@@ -142,7 +142,7 @@
   public void testBothOldAndNewShortCircuitConfigured() throws Exception {
     final short REPL_FACTOR = 1;
     final int FILE_LENGTH = 512;
-    Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
+    Assumptions.assumeTrue(null == DomainSocket.getLoadingFailureReason());
     TemporarySocketDirectory socketDir = new TemporarySocketDirectory();
     HdfsConfiguration conf = getConfiguration(socketDir);
     MiniDFSCluster cluster =
@@ -164,7 +164,7 @@
     byte buf[] = new byte[FILE_LENGTH];
     IOUtils.readFully(fis, buf, 0, FILE_LENGTH);
     fis.close();
-    Assert.assertArrayEquals(orig, buf);
+    Assertions.assertArrayEquals(orig, buf);
     Arrays.equals(orig, buf);
     cluster.shutdown();
   }
@@ -203,7 +203,7 @@
 
       // test getBlockLocalPathInfo
       final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
-      Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
+      Assertions.assertEquals(originalGS, info.getBlock().getGenerationStamp());
     }
 
     { // append one byte
@@ -217,13 +217,13 @@
       final LocatedBlock lb = cluster.getNameNode().getRpcServer()
           .getBlockLocations(path.toString(), 0, 1).get(0);
       final long newGS = lb.getBlock().getGenerationStamp();
-      Assert.assertTrue(newGS > originalGS);
+      Assertions.assertTrue(newGS > originalGS);
 
       // getBlockLocalPathInfo using the original block.
-      Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
+      Assertions.assertEquals(originalGS, originalBlock.getGenerationStamp());
       final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(
           originalBlock, token);
-      Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
+      Assertions.assertEquals(newGS, info.getBlock().getGenerationStamp());
     }
     cluster.shutdown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java
index 78c9087..7329463 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java
@@ -29,7 +29,8 @@
 import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import org.apache.hadoop.util.FakeTimer;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 import org.junit.Test;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -107,8 +108,8 @@
         SHORT_CIRCUIT_READ_METRIC_REGISTERED_NAME);
     double averageLatency = getDoubleGauge(
         SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
-    assertTrue("Average Latency of Short Circuit Reads lower than expected",
-        averageLatency >= SLOW_READ_DELAY);
+      assertTrue(
+              averageLatency >= SLOW_READ_DELAY, "Average Latency of Short Circuit Reads lower than expected");
   }
 
   @Test(timeout = 300_000)
@@ -165,8 +166,8 @@
     double averageLatency = getDoubleGauge(
         SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
 
-    assertTrue("Average Latency of Short Circuit Reads lower than expected",
-        averageLatency >= SLOW_READ_DELAY*2);
+      assertTrue(
+              averageLatency >= SLOW_READ_DELAY * 2, "Average Latency of Short Circuit Reads lower than expected");
   }
 
   @Test(timeout = 300_000)
@@ -220,7 +221,7 @@
     double averageLatency = getDoubleGauge(
         SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
 
-    assertTrue("Average Latency of Short Circuit Reads lower than expected",
-        averageLatency >= expectedAvgLatency);
+      assertTrue(
+              averageLatency >= expectedAvgLatency, "Average Latency of Short Circuit Reads lower than expected");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java
index 5638720..0be168e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.Random;
@@ -28,9 +28,9 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This tests BlockReaderRemote.
@@ -57,7 +57,7 @@
     return util.getBlockReader(block, 0, blockData.length);
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     util = new BlockReaderTestUtil(1, new HdfsConfiguration());
     blockData = getBlockData();
@@ -70,7 +70,7 @@
     reader = getBlockReader(blk);
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     util.shutdown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java
index 54156f6..273be71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java
@@ -29,9 +29,9 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 public class TestClientBlockVerification {
@@ -44,7 +44,7 @@
   static {
     GenericTestUtils.setLogLevel(BlockReaderRemote.LOG, Level.TRACE);
   }
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws Exception {
     final int REPLICATION_FACTOR = 1;
     util = new BlockReaderTestUtil(REPLICATION_FACTOR);
@@ -118,7 +118,7 @@
   }
 
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws Exception {
     util.shutdown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java
index 35c13e6..db9de02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java
@@ -31,10 +31,10 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestHdfsCryptoStreams extends CryptoStreamsTestBase {
   private static MiniDFSCluster dfsCluster;
@@ -43,7 +43,7 @@
   private static Path path;
   private static Path file;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     Configuration conf = new HdfsConfiguration();
     dfsCluster = new MiniDFSCluster.Builder(conf).build();
@@ -52,14 +52,14 @@
     codec = CryptoCodec.getInstance(conf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws Exception {
     if (dfsCluster != null) {
       dfsCluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws IOException {
     ++pathCount;
@@ -70,7 +70,7 @@
     super.setUp();
   }
 
-  @After
+  @AfterEach
   public void cleanUp() throws IOException {
     fs.delete(path, true);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
index 1d0024d..3f57b76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
@@ -27,9 +27,9 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.Sets;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -39,10 +39,7 @@
 import java.util.HashSet;
 import java.util.Set;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 
 /**
@@ -59,7 +56,7 @@
   @Rule
   public Timeout testTimeout = new Timeout(30000);
 
-  @Before
+  @BeforeEach
   public void setupDatanodes() {
     final String[] racks = {
         "/l1/d1/r1", "/l1/d1/r1", "/l1/d1/r2", "/l1/d1/r2", "/l1/d1/r2",
@@ -635,7 +632,7 @@
     excluded.add(dns[1]);
     Node n = dfsCluster.chooseRandomWithStorageType("/default",
         "/default/rack1", excluded, StorageType.DISK);
-    assertNull("No node should have been selected.", n);
+      assertNull(n, "No node should have been selected.");
   }
 
   /**
@@ -665,6 +662,6 @@
     Node n = dfsCluster.chooseRandomWithStorageType(
         "/default/rack1/0.0.0.0:" + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
         null, excluded, StorageType.DISK);
-    assertNull("No node should have been selected.", n);
+      assertNull(n, "No node should have been selected.");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
index 77a059a..7a03f02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
@@ -24,10 +24,10 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -37,8 +37,8 @@
 import java.util.Random;
 import java.util.Set;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Performance test of the new DFSNetworkTopology chooseRandom.
@@ -49,7 +49,7 @@
  * tests without something reading the value. So disabled the tests to for now,
  * anyone interested in looking at the numbers can enable them.
  */
-@Ignore
+@Disabled
 public class TestDFSNetworkTopologyPerformance {
   public static final Logger LOG =
       LoggerFactory.getLogger(TestDFSNetworkTopologyPerformance.class);
@@ -83,7 +83,7 @@
   private long localEnd;
 
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     racks = new String[NODE_NUM];
     hosts = new String[NODE_NUM];
@@ -95,7 +95,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     cluster = NetworkTopology.getInstance(new Configuration());
     dfscluster = DFSNetworkTopology.getInstance(new Configuration());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java
index c461e2c..82b6b35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java
@@ -22,8 +22,8 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.retry.AtMostOnce;
 import org.apache.hadoop.io.retry.Idempotent;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests to make sure all the protocol class public methods have
@@ -34,10 +34,10 @@
   public void checkAnnotations() {
     Method[] methods = NamenodeProtocols.class.getMethods();
     for (Method m : methods) {
-      Assert.assertTrue(
-          "Idempotent or AtMostOnce annotation is not present " + m,
-          m.isAnnotationPresent(Idempotent.class)
-              || m.isAnnotationPresent(AtMostOnce.class));
+        Assertions.assertTrue(
+                m.isAnnotationPresent(Idempotent.class)
+                        || m.isAnnotationPresent(AtMostOnce.class),
+                "Idempotent or AtMostOnce annotation is not present " + m);
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
index 17b3939..133c52c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
@@ -18,11 +18,7 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -53,12 +49,12 @@
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo.Capability;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
-import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import org.apache.hadoop.thirdparty.protobuf.ByteString;
 import org.apache.hadoop.thirdparty.protobuf.ServiceException;
+import org.junit.jupiter.api.Test;
 
 public class TestBlockListAsLongs {
   static Block b1 = new Block(1, 11, 111);
@@ -177,12 +173,12 @@
       assertNotNull(replica);
       Replica expected = reportReplicas.remove(replica.getBlockId());
       assertNotNull(expected);
-      assertEquals("wrong bytes",
-          expected.getNumBytes(), replica.getNumBytes());
-      assertEquals("wrong genstamp",
-          expected.getGenerationStamp(), replica.getGenerationStamp());
-      assertEquals("wrong replica state",
-          expected.getState(), replica.getState());
+        assertEquals(
+                expected.getNumBytes(), replica.getNumBytes(), "wrong bytes");
+        assertEquals(
+                expected.getGenerationStamp(), replica.getGenerationStamp(), "wrong genstamp");
+        assertEquals(
+                expected.getState(), replica.getState(), "wrong replica state");
     }
     assertTrue(reportReplicas.isEmpty());
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
index f074897..e73cc97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.*;
 
 import java.util.ArrayList;
@@ -132,19 +130,19 @@
         NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME,
         NameNodeLayoutVersion.Feature.NVDIMM_SUPPORT);
     for (LayoutFeature f : compatibleFeatures) {
-      assertEquals(String.format("Expected minimum compatible layout version " +
-          "%d for feature %s.", baseLV, f), baseLV,
-          f.getInfo().getMinimumCompatibleLayoutVersion());
+        assertEquals(baseLV,
+                f.getInfo().getMinimumCompatibleLayoutVersion(), String.format("Expected minimum compatible layout version " +
+                "%d for feature %s.", baseLV, f));
     }
     List<LayoutFeature> features = new ArrayList<>();
     features.addAll(EnumSet.allOf(LayoutVersion.Feature.class));
     features.addAll(EnumSet.allOf(NameNodeLayoutVersion.Feature.class));
     for (LayoutFeature f : features) {
       if (!compatibleFeatures.contains(f)) {
-        assertEquals(String.format("Expected feature %s to have minimum " +
-            "compatible layout version set to itself.", f),
-            f.getInfo().getLayoutVersion(),
-            f.getInfo().getMinimumCompatibleLayoutVersion());
+          assertEquals(
+                  f.getInfo().getLayoutVersion(),
+                  f.getInfo().getMinimumCompatibleLayoutVersion(), String.format("Expected feature %s to have minimum " +
+                  "compatible layout version set to itself.", f));
       }
     }
   }
@@ -161,10 +159,10 @@
     LayoutFeature prevF = null;
     for (LayoutFeature f : EnumSet.allOf(NameNodeLayoutVersion.Feature.class)) {
       if (prevF != null) {
-        assertTrue(String.format("Features %s and %s not listed in order of " +
-            "minimum compatible layout version.", prevF, f),
-            f.getInfo().getMinimumCompatibleLayoutVersion() <=
-            prevF.getInfo().getMinimumCompatibleLayoutVersion());
+          assertTrue(
+                  f.getInfo().getMinimumCompatibleLayoutVersion() <=
+                          prevF.getInfo().getMinimumCompatibleLayoutVersion(), String.format("Features %s and %s not listed in order of " +
+                  "minimum compatible layout version.", prevF, f));
       } else {
         prevF = f;
       }
@@ -201,10 +199,10 @@
         .getLayoutVersion();
     int actualMinCompatLV = LayoutVersion.getMinimumCompatibleLayoutVersion(
         NameNodeLayoutVersion.Feature.values());
-    assertEquals("The minimum compatible layout version has changed.  " +
-        "Downgrade to prior versions is no longer possible.  Please either " +
-        "restore compatibility, or if the incompatibility is intentional, " +
-        "then update this assertion.", expectedMinCompatLV, actualMinCompatLV);
+      assertEquals(expectedMinCompatLV, actualMinCompatLV, "The minimum compatible layout version has changed.  " +
+              "Downgrade to prior versions is no longer possible.  Please either " +
+              "restore compatibility, or if the incompatibility is intentional, " +
+              "then update this assertion.");
   }
 
   /**
@@ -218,9 +216,9 @@
     SortedSet<LayoutFeature> ancestorSet = NameNodeLayoutVersion.getFeatures(ancestorLV);
     assertNotNull(ancestorSet);
     for (LayoutFeature  feature : ancestorSet) {
-      assertTrue("LV " + lv + " does nto support " + feature
-          + " supported by the ancestor LV " + info.getAncestorLayoutVersion(),
-          NameNodeLayoutVersion.supports(feature, lv));
+        assertTrue(
+                NameNodeLayoutVersion.supports(feature, lv), "LV " + lv + " does nto support " + feature
+                + " supported by the ancestor LV " + info.getAncestorLayoutVersion());
     }
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java
index 33b5bd9..4eaa8d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java
@@ -23,7 +23,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.junit.Test;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestLocatedBlock {
   public static final Logger LOG =
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java
index f627f00..123747e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java
@@ -25,12 +25,13 @@
 
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.Test;
 import org.mockito.Mockito;
 
 import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class TestPacketReceiver {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
index 63ce45b..6a7b03a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
@@ -30,7 +30,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.util.Properties;
@@ -44,8 +44,8 @@
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 public abstract class SaslDataTransferTestCase {
 
@@ -75,7 +75,7 @@
     return hdfsKeytab;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void initKdc() throws Exception {
     baseDir = GenericTestUtils
         .getTestDir(SaslDataTransferTestCase.class.getSimpleName());
@@ -100,7 +100,7 @@
     spnegoPrincipal = "HTTP/localhost@" + kdc.getRealm();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownKdc() throws Exception {
     if (kdc != null) {
       kdc.stop();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestBlackListBasedTrustedChannelResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestBlackListBasedTrustedChannelResolver.java
index 75e1a4d..81a9b4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestBlackListBasedTrustedChannelResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestBlackListBasedTrustedChannelResolver.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -29,9 +27,9 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlackListBasedTrustedChannelResolver;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test class for  {@link BlackListBasedTrustedChannelResolver}.
@@ -43,7 +41,7 @@
   private final static String BLACK_LISTED = "127.0.0.1\n216.58.216.174\n";
   private BlackListBasedTrustedChannelResolver resolver;
 
-  @Before
+  @BeforeEach
   public void setup() {
     blacklistFile = new File(GenericTestUtils.getTestDir(), FILE_NAME);
     resolver
@@ -55,7 +53,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void cleanUp() {
     FileUtils.deleteQuietly(blacklistFile);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
index 3dd0b7e..7547013 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
@@ -20,9 +20,8 @@
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -55,10 +54,10 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.After;
-import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
 import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
@@ -78,7 +77,7 @@
   @Rule
   public Timeout timeout = new Timeout(60000);
 
-  @After
+  @AfterEach
   public void shutdown() {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {
@@ -142,7 +141,7 @@
         LoggerFactory.getLogger(DataNode.class));
     try {
       doTest(clientConf);
-      Assert.fail("Should fail if SASL data transfer protection is not " +
+      Assertions.fail("Should fail if SASL data transfer protection is not " +
           "configured or not supported in client");
     } catch (IOException e) {
       GenericTestUtils.assertMatches(e.getMessage(), 
@@ -199,7 +198,7 @@
   private void doTest(HdfsConfiguration conf) throws IOException {
     fs = FileSystem.get(cluster.getURI(), conf);
     FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
-    assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
+    assertEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
       DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
     BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0,
       Long.MAX_VALUE);
@@ -252,7 +251,7 @@
       Peer peer = DFSUtilClient.peerFromSocketAndKey(saslClient, socket,
           dataEncKeyFactory, new Token(), fakeDatanodeId, 1);
       peer.close();
-      Assert.fail("Expected DFSClient#peerFromSocketAndKey to time out.");
+      Assertions.fail("Expected DFSClient#peerFromSocketAndKey to time out.");
     } catch (SocketTimeoutException e) {
       GenericTestUtils.assertExceptionContains("Read timed out", e);
     } finally {
@@ -303,7 +302,7 @@
       saslClient.socketSend(socket, null, null, dataEncryptionKeyFactory,
           null, null);
 
-      Assert.fail("Expected IOException from "
+      Assertions.fail("Expected IOException from "
           + "SaslDataTransferClient#checkTrustAndSend");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Encryption enabled", e);
@@ -352,7 +351,7 @@
       saslClient.socketSend(socket, null, null, dataEncryptionKeyFactory,
           null, null);
 
-      Assert.fail("Expected IOException from "
+      Assertions.fail("Expected IOException from "
           + "SaslDataTransferClient#checkTrustAndSend");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Encryption enabled", e);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index e9bcef5..8d61505 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -24,11 +24,8 @@
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -113,9 +110,8 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Lists;
-import org.junit.Assert;
 import org.junit.Test;
-
+import org.junit.jupiter.api.Assertions;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.protobuf.ByteString;
@@ -722,7 +718,7 @@
     AclEntry[] actual = Lists.newArrayList(
         PBHelperClient.convertAclEntry(PBHelperClient.convertAclEntryProto(Lists
             .newArrayList(e1, e2, e3)))).toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(expected, actual);
+    Assertions.assertArrayEquals(expected, actual);
   }
 
   @Test
@@ -732,7 +728,7 @@
         .setType(AclEntryType.OTHER).build();
     AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e)
         .build();
-    Assert.assertEquals(s, PBHelperClient.convert(PBHelperClient.convert(s)));
+    Assertions.assertEquals(s, PBHelperClient.convert(PBHelperClient.convert(s)));
   }
   
   @Test
@@ -809,17 +805,17 @@
         ImmutableMap.of("peer1", 0.0, "peer2", 1.0, "peer3", 2.0));
     SlowPeerReports slowPeersConverted1 = PBHelper.convertSlowPeerInfo(
         PBHelper.convertSlowPeerInfo(slowPeers));
-    assertTrue(
-        "Expected map:" + slowPeers + ", got map:" +
-            slowPeersConverted1.getSlowPeers(),
-        slowPeersConverted1.equals(slowPeers));
+      assertTrue(
+              slowPeersConverted1.equals(slowPeers),
+              "Expected map:" + slowPeers + ", got map:" +
+                      slowPeersConverted1.getSlowPeers());
 
     // Test with an empty map.
     SlowPeerReports slowPeersConverted2 = PBHelper.convertSlowPeerInfo(
         PBHelper.convertSlowPeerInfo(SlowPeerReports.EMPTY_REPORT));
-    assertTrue(
-        "Expected empty map:" + ", got map:" + slowPeersConverted2,
-        slowPeersConverted2.equals(SlowPeerReports.EMPTY_REPORT));
+      assertTrue(
+              slowPeersConverted2.equals(SlowPeerReports.EMPTY_REPORT),
+              "Expected empty map:" + ", got map:" + slowPeersConverted2);
   }
 
   @Test
@@ -835,17 +831,17 @@
                 SlowDiskReports.DiskOp.WRITE, 1.3)));
     SlowDiskReports slowDisksConverted1 = PBHelper.convertSlowDiskInfo(
         PBHelper.convertSlowDiskInfo(slowDisks));
-    assertTrue(
-        "Expected map:" + slowDisks + ", got map:" +
-            slowDisksConverted1.getSlowDisks(),
-        slowDisksConverted1.equals(slowDisks));
+      assertTrue(
+              slowDisksConverted1.equals(slowDisks),
+              "Expected map:" + slowDisks + ", got map:" +
+                      slowDisksConverted1.getSlowDisks());
 
     // Test with an empty map
     SlowDiskReports slowDisksConverted2 = PBHelper.convertSlowDiskInfo(
         PBHelper.convertSlowDiskInfo(SlowDiskReports.EMPTY_REPORT));
-    assertTrue(
-        "Expected empty map:" + ", got map:" + slowDisksConverted2,
-        slowDisksConverted2.equals(SlowDiskReports.EMPTY_REPORT));
+      assertTrue(
+              slowDisksConverted2.equals(SlowDiskReports.EMPTY_REPORT),
+              "Expected empty map:" + ", got map:" + slowDisksConverted2);
   }
 
   private void assertBlockECRecoveryInfoEquals(
@@ -918,12 +914,12 @@
         DataChecksum.Type.valueOf(DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT).id));
     HdfsProtos.FsServerDefaultsProto proto = b.build();
 
-    assertFalse("KeyProvider uri is not supported",
-        proto.hasKeyProviderUri());
+      assertFalse(
+              proto.hasKeyProviderUri(), "KeyProvider uri is not supported");
     FsServerDefaults fsServerDefaults = PBHelperClient.convert(proto);
-    Assert.assertNotNull("FsServerDefaults is null", fsServerDefaults);
-    Assert.assertNull("KeyProviderUri should be null",
-        fsServerDefaults.getKeyProviderUri());
+      Assertions.assertNotNull(fsServerDefaults, "FsServerDefaults is null");
+      Assertions.assertNull(
+              fsServerDefaults.getKeyProviderUri(), "KeyProviderUri should be null");
   }
 
   @Test
@@ -935,15 +931,15 @@
           new AddErasureCodingPolicyResponse(policy);
       HdfsProtos.AddErasureCodingPolicyResponseProto proto = PBHelperClient
           .convertAddErasureCodingPolicyResponse(response);
-      // Optional fields should not be set.
-      assertFalse("Unnecessary field is set.", proto.hasErrorMsg());
+        // Optional fields should not be set.
+        assertFalse(proto.hasErrorMsg(), "Unnecessary field is set.");
       // Convert proto back to an object and check for equality.
       AddErasureCodingPolicyResponse convertedResponse = PBHelperClient
           .convertAddErasureCodingPolicyResponse(proto);
-      assertEquals("Converted policy not equal", response.getPolicy(),
-          convertedResponse.getPolicy());
-      assertEquals("Converted policy not equal", response.isSucceed(),
-          convertedResponse.isSucceed());
+        assertEquals(response.getPolicy(),
+                convertedResponse.getPolicy(), "Converted policy not equal");
+        assertEquals(response.isSucceed(),
+                convertedResponse.isSucceed(), "Converted policy not equal");
     }
 
     ErasureCodingPolicy policy = SystemErasureCodingPolicies
@@ -955,10 +951,10 @@
     // Convert proto back to an object and check for equality.
     AddErasureCodingPolicyResponse convertedResponse = PBHelperClient
         .convertAddErasureCodingPolicyResponse(proto);
-    assertEquals("Converted policy not equal", response.getPolicy(),
-        convertedResponse.getPolicy());
-    assertEquals("Converted policy not equal", response.getErrorMsg(),
-        convertedResponse.getErrorMsg());
+      assertEquals(response.getPolicy(),
+              convertedResponse.getPolicy(), "Converted policy not equal");
+      assertEquals(response.getErrorMsg(),
+              convertedResponse.getErrorMsg(), "Converted policy not equal");
   }
 
   @Test
@@ -968,14 +964,14 @@
         SystemErasureCodingPolicies.getPolicies()) {
       HdfsProtos.ErasureCodingPolicyProto proto = PBHelperClient
           .convertErasureCodingPolicy(policy);
-      // Optional fields should not be set.
-      assertFalse("Unnecessary field is set.", proto.hasName());
-      assertFalse("Unnecessary field is set.", proto.hasSchema());
-      assertFalse("Unnecessary field is set.", proto.hasCellSize());
+        // Optional fields should not be set.
+        assertFalse(proto.hasName(), "Unnecessary field is set.");
+        assertFalse(proto.hasSchema(), "Unnecessary field is set.");
+        assertFalse(proto.hasCellSize(), "Unnecessary field is set.");
       // Convert proto back to an object and check for equality.
       ErasureCodingPolicy convertedPolicy = PBHelperClient
           .convertErasureCodingPolicy(proto);
-      assertEquals("Converted policy not equal", policy, convertedPolicy);
+        assertEquals(policy, convertedPolicy, "Converted policy not equal");
     }
     // Check conversion of a non-built-in policy.
     ECSchema newSchema = new ECSchema("testcodec", 3, 2);
@@ -983,14 +979,14 @@
         new ErasureCodingPolicy(newSchema, 128 * 1024);
     HdfsProtos.ErasureCodingPolicyProto proto = PBHelperClient
         .convertErasureCodingPolicy(newPolicy);
-    // Optional fields should be set.
-    assertTrue("Optional field not set", proto.hasName());
-    assertTrue("Optional field not set", proto.hasSchema());
-    assertTrue("Optional field not set", proto.hasCellSize());
+      // Optional fields should be set.
+      assertTrue(proto.hasName(), "Optional field not set");
+      assertTrue(proto.hasSchema(), "Optional field not set");
+      assertTrue(proto.hasCellSize(), "Optional field not set");
     ErasureCodingPolicy convertedPolicy = PBHelperClient
         .convertErasureCodingPolicy(proto);
-    // Converted policy should be equal.
-    assertEquals("Converted policy not equal", newPolicy, convertedPolicy);
+      // Converted policy should be equal.
+      assertEquals(newPolicy, convertedPolicy, "Converted policy not equal");
   }
 
   @Test(expected = UninitializedMessageException.class)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index d0bbd44..34f38e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.qjournal;
 
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.FAKE_NSINFO;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java
index fa0e6ef..1c79b9de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.Closeable;
 import java.io.File;
@@ -128,9 +125,9 @@
       
       FSEditLogOp op = stream.readOp();
       while (op == null) {
-        assertTrue("Expected to find txid " + expected + ", " +
-            "but no more streams available to read from",
-            iter.hasNext());
+          assertTrue(
+                  iter.hasNext(), "Expected to find txid " + expected + ", " +
+                  "but no more streams available to read from");
         stream = iter.next();
         op = stream.readOp();
       }
@@ -140,8 +137,8 @@
     }
     
     assertNull(stream.readOp());
-    assertFalse("Expected no more txns after " + lastTxnId +
-        " but more streams are available", iter.hasNext());
+      assertFalse(iter.hasNext(), "Expected no more txns after " + lastTxnId +
+              " but more streams are available");
   }
   
 
@@ -154,8 +151,8 @@
         count++;
       }
     }
-    assertTrue("File " + fname + " should exist in a quorum of dirs",
-        count >= cluster.getQuorumSize());
+      assertTrue(
+              count >= cluster.getQuorumSize(), "File " + fname + " should exist in a quorum of dirs");
   }
 
   public static long recoverAndReturnLastTxn(QuorumJournalManager qjm)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
index cace7c9..dae4588 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -27,7 +27,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 public class TestMiniJournalCluster {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
index 4483667..2884c21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,9 +35,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 
 public class TestNNWithQJM {
@@ -45,18 +46,18 @@
   private final Path TEST_PATH = new Path("/test-dir");
   private final Path TEST_PATH_2 = new Path("/test-dir-2");
 
-  @Before
+  @BeforeEach
   public void resetSystemExit() {
     ExitUtil.resetFirstExitException();
   }
   
-  @Before
+  @BeforeEach
   public void startJNs() throws Exception {
     mjc = new MiniJournalCluster.Builder(conf).build();
     mjc.waitActive();
   }
   
-  @After
+  @AfterEach
   public void stopJNs() throws Exception {
     if (mjc != null) {
       mjc.shutdown();
@@ -200,7 +201,7 @@
     } catch (ExitException ee) {
       GenericTestUtils.assertExceptionContains(
           "Unable to start log segment 1: too few journals", ee);
-      assertTrue("Didn't terminate properly ", ExitUtil.terminateCalled());
+        assertTrue(ExitUtil.terminateCalled(), "Didn't terminate properly ");
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
index f7c0e2d..63909d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
@@ -17,8 +17,7 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
-import static org.junit.Assert.*;
-
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
@@ -58,12 +57,12 @@
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 public class TestSecureNNWithQJM {
@@ -86,7 +85,7 @@
   @Rule
   public Timeout timeout = new Timeout(180000);
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     baseDir =
         GenericTestUtils.getTestDir(TestSecureNNWithQJM.class.getSimpleName());
@@ -101,8 +100,8 @@
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
       baseConf);
     UserGroupInformation.setConfiguration(baseConf);
-    assertTrue("Expected configuration to enable security",
-      UserGroupInformation.isSecurityEnabled());
+      assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     String userName = UserGroupInformation.getLoginUser().getShortUserName();
     File keytabFile = new File(baseDir, userName + ".keytab");
@@ -147,7 +146,7 @@
         KeyStoreTestUtil.getServerSSLConfigFileName());
   }
 
-  @AfterClass
+  @AfterAll
   public static void destroy() throws Exception {
     if (kdc != null) {
       kdc.stop();
@@ -157,12 +156,12 @@
     UserGroupInformation.reset();
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration(baseConf);
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws IOException {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
index df2f359..f2ee6a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -31,13 +32,13 @@
 import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
+import org.junit.jupiter.api.Test;
 
 
 public class TestEpochsAreUnique {
@@ -96,8 +97,8 @@
           }
         }
         LOG.info("Created epoch " + newEpoch);
-        assertTrue("New epoch " + newEpoch + " should be greater than previous " +
-            prevEpoch, newEpoch > prevEpoch);
+          assertTrue(newEpoch > prevEpoch, "New epoch " + newEpoch + " should be greater than previous " +
+                  prevEpoch);
         prevEpoch = newEpoch;
       }
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
index f2f4642..6559f46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -36,8 +36,8 @@
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.util.function.Supplier;
@@ -61,7 +61,7 @@
   private static final int LIMIT_QUEUE_SIZE_BYTES =
       LIMIT_QUEUE_SIZE_MB * 1024 * 1024;
   
-  @Before
+  @BeforeEach
   public void setupMock() {
     conf.setInt(DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY,
         LIMIT_QUEUE_SIZE_MB);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
index 6cdbe2d..0ecb494 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
@@ -20,8 +20,8 @@
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.FAKE_NSINFO;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.JID;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeSegment;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -53,7 +53,7 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Sets;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -260,9 +260,9 @@
             checkException(t);
             continue;
           }
-          assertTrue("Recovered only up to txnid " + recovered +
-              " but had gotten an ack for " + lastAcked,
-              recovered >= lastAcked);
+            assertTrue(
+                    recovered >= lastAcked, "Recovered only up to txnid " + recovered +
+                    " but had gotten an ack for " + lastAcked);
           
           txid = recovered + 1;
           
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
index 31b452e..48516b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.util.Map;
 import java.util.TreeMap;
@@ -72,8 +73,8 @@
         "f1", SettableFuture.<String>create());
 
     QuorumCall<String, String> q = QuorumCall.create(futures);
-    assertEquals("The number of quorum calls for which a response has been"
-            + " received should be 0", 0, q.countResponses());
+      assertEquals(0, q.countResponses(), "The number of quorum calls for which a response has been"
+              + " received should be 0");
 
     try {
       q.waitFor(0, 1, 100, 10, "test");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index c4760a0..aff7153 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -23,11 +23,7 @@
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeSegment;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeTxns;
 import static org.apache.hadoop.hdfs.qjournal.client.TestQuorumJournalManagerUnit.futureThrows;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.Closeable;
 import java.io.File;
@@ -61,10 +57,10 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine2;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
 import org.mockito.stubbing.Stubber;
@@ -93,7 +89,7 @@
   @Rule
   public TestName name = new TestName();
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new Configuration();
     if (!name.getMethodName().equals("testSelectThreadCounts")) {
@@ -119,7 +115,7 @@
     assertEquals(1, qjm.getLoggerSetForTests().getEpoch());
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws IOException, InterruptedException,
       TimeoutException {
     IOUtils.cleanupWithLogger(LOG, toClose.toArray(new Closeable[0]));
@@ -1063,10 +1059,10 @@
         "Logger channel (from parallel executor) to " + ipcAddr;
     long num = Thread.getAllStackTraces().keySet().stream()
         .filter((t) -> t.getName().contains(expectedName)).count();
-    // The number of threads for the stopped jn shouldn't be more than the
-    // configured value.
-    assertTrue("Number of threads are : " + num,
-        num <= DFSConfigKeys.DFS_QJOURNAL_PARALLEL_READ_NUM_THREADS_DEFAULT);
+      // The number of threads for the stopped jn shouldn't be more than the
+      // configured value.
+      assertTrue(
+              num <= DFSConfigKeys.DFS_QJOURNAL_PARALLEL_READ_NUM_THREADS_DEFAULT, "Number of threads are : " + num);
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
index c75f6e8..eeb17f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -33,7 +33,6 @@
 
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Lists;
-import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -53,8 +52,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.mockito.stubbing.Stubber;
 
@@ -84,7 +84,7 @@
   private List<AsyncLogger> spyLoggers;
   private QuorumJournalManager qjm;
   
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     spyLoggers = ImmutableList.of(
         mockLogger(),
@@ -184,7 +184,7 @@
     QuorumOutputStream os = (QuorumOutputStream) qjm.startLogSegment(1,
         NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     String report = os.generateReport();
-    Assert.assertFalse("Report should be plain text", report.contains("<"));
+      Assertions.assertFalse(report.contains("<"), "Report should be plain text");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java
index dc16b66..29063a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java
@@ -17,17 +17,17 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
-import org.junit.Test;
 import org.mockito.Mockito;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.qjournal.client.SegmentRecoveryComparator.INSTANCE;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
index 6f0eece..ce97f7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
@@ -17,11 +17,7 @@
  */
 package org.apache.hadoop.hdfs.qjournal.server;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes;
 import java.io.ByteArrayOutputStream;
@@ -50,11 +46,11 @@
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 public class TestJournal {
@@ -75,7 +71,7 @@
   private Journal journal;
 
   
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     FileUtil.fullyDelete(TEST_LOG_DIR);
     conf = new Configuration();
@@ -86,13 +82,13 @@
     journal.format(FAKE_NSINFO, false);
   }
   
-  @After
+  @AfterEach
   public void verifyNoStorageErrors() throws Exception{
     Mockito.verify(mockErrorReporter, Mockito.never())
       .reportErrorOnFile(Mockito.<File>any());
   }
   
-  @After
+  @AfterEach
   public void cleanup() {
     IOUtils.closeStream(journal);
   }
@@ -115,15 +111,15 @@
     // verify the in-progress editlog segment
     SegmentStateProto segmentState = journal.getSegmentInfo(1);
     assertTrue(segmentState.getIsInProgress());
-    Assert.assertEquals(numTxns, segmentState.getEndTxId());
-    Assert.assertEquals(1, segmentState.getStartTxId());
+    Assertions.assertEquals(numTxns, segmentState.getEndTxId());
+    Assertions.assertEquals(1, segmentState.getStartTxId());
     
     // finalize the segment and verify it again
     journal.finalizeLogSegment(makeRI(3), 1, numTxns);
     segmentState = journal.getSegmentInfo(1);
     assertFalse(segmentState.getIsInProgress());
-    Assert.assertEquals(numTxns, segmentState.getEndTxId());
-    Assert.assertEquals(1, segmentState.getStartTxId());
+    Assertions.assertEquals(numTxns, segmentState.getEndTxId());
+    Assertions.assertEquals(1, segmentState.getStartTxId());
   }
 
   /**
@@ -287,7 +283,7 @@
   
   @Test (timeout = 10000)
   public void testJournalLocking() throws Exception {
-    Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
+    Assumptions.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
     StorageDirectory sd = journal.getStorage().getStorageDir(0);
     File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
     
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
index 6e117b7..c142b49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
@@ -41,12 +41,13 @@
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StopWatch;
-import org.junit.After;
-import org.junit.Assert;
-import static org.junit.Assert.*;
-import org.junit.Before;
+import static org.junit.jupiter.api.Assertions.*;
+
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
 
@@ -78,7 +79,7 @@
     DefaultMetricsSystem.setMiniClusterMode(true);
   }
   
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
         File.separator + "TestJournalNode");
@@ -188,7 +189,7 @@
         "qjournal://journalnode0:9900;journalnode1:9901/test-journalid-ns2");
   }
   
-  @After
+  @AfterEach
   public void teardown() throws Exception {
     jn.stop(0);
   }
@@ -316,9 +317,9 @@
     
     // Check default servlets.
     String pageContents = DFSTestUtil.urlGet(new URL(urlRoot + "/jmx"));
-    assertTrue("Bad contents: " + pageContents,
-        pageContents.contains(
-            "Hadoop:service=JournalNode,name=JvmMetrics"));
+      assertTrue(
+              pageContents.contains(
+                      "Hadoop:service=JournalNode,name=JvmMetrics"), "Bad contents: " + pageContents);
 
     // Create some edits on server side
     byte[] EDITS_DATA = QJMTestUtil.createTxnData(1, 3);
@@ -509,17 +510,17 @@
     //JournalSyncer will not be started, as journalsync is not enabled
     conf.setBoolean(DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, false);
     jn.getOrCreateJournal(journalId);
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
     //Trying by passing nameserviceId still journalnodesyncer should not start
     // IstriedJournalSyncerStartWithnsId should also be false
     jn.getOrCreateJournal(journalId, "mycluster");
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
   }
@@ -530,9 +531,9 @@
     //JournalSyncer will not be started,
     // as shared edits hostnames are not resolved
     jn.getOrCreateJournal(journalId);
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
     //Trying by passing nameserviceId, now
@@ -540,9 +541,9 @@
     // but journalnode syncer will not be started,
     // as hostnames are not resolved
     jn.getOrCreateJournal(journalId, "mycluster");
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(true,
+    Assertions.assertEquals(true,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
   }
@@ -553,9 +554,9 @@
     //JournalSyncer will not be started,
     // as shared edits hostnames are not resolved
     jn.getOrCreateJournal(journalId);
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
     //Trying by passing nameserviceId and resolve hostnames
@@ -563,9 +564,9 @@
     // and also journalnode syncer will also be started
     setupStaticHostResolution(2, "jn");
     jn.getOrCreateJournal(journalId, "mycluster");
-    Assert.assertEquals(true,
+    Assertions.assertEquals(true,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(true,
+    Assertions.assertEquals(true,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
   }
@@ -578,9 +579,9 @@
     // but configured shared edits dir is appended with nameserviceId
     setupStaticHostResolution(2, "journalnode");
     jn.getOrCreateJournal(journalId);
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
     //Trying by passing nameserviceId and resolve hostnames
@@ -588,9 +589,9 @@
     // and also journalnode syncer will also be started
 
     jn.getOrCreateJournal(journalId, "ns1");
-    Assert.assertEquals(true,
+    Assertions.assertEquals(true,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(true,
+    Assertions.assertEquals(true,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
   }
 
@@ -602,9 +603,9 @@
     // namenodeId
     setupStaticHostResolution(2, "journalnode");
     jn.getOrCreateJournal(journalId);
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
     //Trying by passing nameserviceId and resolve hostnames
@@ -612,9 +613,9 @@
     // and also journalnode syncer will also be started
 
     jn.getOrCreateJournal(journalId, "ns1");
-    Assert.assertEquals(true,
+    Assertions.assertEquals(true,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(true,
+    Assertions.assertEquals(true,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
   }
 
@@ -627,9 +628,9 @@
     // namenodeId
     setupStaticHostResolution(2, "journalnode");
     jn.getOrCreateJournal(journalId);
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
 
     //Trying by passing nameserviceId and resolve hostnames
@@ -638,9 +639,9 @@
     // as for each nnId, different shared Edits dir value is configured
 
     jn.getOrCreateJournal(journalId, "ns1");
-    Assert.assertEquals(false,
+    Assertions.assertEquals(false,
         jn.getJournalSyncerStatus(journalId));
-    Assert.assertEquals(true,
+    Assertions.assertEquals(true,
         jn.getJournal(journalId).getTriedJournalSyncerStartedwithnsId());
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
index e571fbc..2cda514 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.qjournal.server;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -32,9 +30,9 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.eclipse.jetty.util.ajax.JSON;
 
 /**
@@ -48,7 +46,7 @@
   private MiniJournalCluster jCluster;
   private JournalNode jn;
   
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     // start 1 journal node
     jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true)
@@ -57,7 +55,7 @@
     jn = jCluster.getJournalNode(0);
   }
   
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     if (jCluster != null) {
       jCluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
index 9d5af1c..d2824d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
@@ -23,16 +23,14 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_BIND_HOST_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_RPC_BIND_HOST_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
 import static org.hamcrest.core.IsNot.not;
 
 import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.http.HttpConfig;
@@ -44,7 +42,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
@@ -67,12 +66,12 @@
   private MiniJournalCluster jCluster;
   private JournalNode jn;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     conf = new HdfsConfiguration();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (jCluster != null) {
       jCluster.shutdown();
@@ -122,8 +121,8 @@
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     String address = jn.getHttpAddress().toString();
-    assertFalse("HTTP Bind address not expected to be wildcard by default.",
-        address.startsWith(WILDCARD_ADDRESS));
+      assertFalse(
+              address.startsWith(WILDCARD_ADDRESS), "HTTP Bind address not expected to be wildcard by default.");
 
     LOG.info("Testing with " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
 
@@ -136,8 +135,8 @@
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     address = jn.getHttpAddress().toString();
-    assertTrue("HTTP Bind address " + address + " is not wildcard.",
-        address.startsWith(WILDCARD_ADDRESS));
+      assertTrue(
+              address.startsWith(WILDCARD_ADDRESS), "HTTP Bind address " + address + " is not wildcard.");
   }
 
   private static final String BASEDIR = System.getProperty("test.build.dir",
@@ -180,8 +179,8 @@
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     String address = jn.getHttpsAddress().toString();
-    assertFalse("HTTP Bind address not expected to be wildcard by default.",
-        address.startsWith(WILDCARD_ADDRESS));
+      assertFalse(
+              address.startsWith(WILDCARD_ADDRESS), "HTTP Bind address not expected to be wildcard by default.");
 
     LOG.info("Testing behavior with " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
 
@@ -194,7 +193,7 @@
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     address = jn.getHttpsAddress().toString();
-    assertTrue("HTTP Bind address " + address + " is not wildcard.",
-        address.startsWith(WILDCARD_ADDRESS));
+      assertTrue(
+              address.startsWith(WILDCARD_ADDRESS), "HTTP Bind address " + address + " is not wildcard.");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
index bc4cf3a..135fa47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
@@ -36,11 +36,11 @@
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.TestName;
 
 import java.io.File;
@@ -65,7 +65,7 @@
   @Rule
   public TestName testName = new TestName();
 
-  @Before
+  @BeforeEach
   public void setUpMiniCluster() throws IOException {
     conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY, true);
@@ -87,7 +87,7 @@
     namesystem = dfsCluster.getNamesystem(0);
   }
 
-  @After
+  @AfterEach
   public void shutDownMiniCluster() throws IOException {
     if (qjmhaCluster != null) {
       qjmhaCluster.shutdown();
@@ -99,7 +99,7 @@
 
     //As by default 3 journal nodes are started;
     for(int i=0; i<3; i++) {
-      Assert.assertEquals(true,
+      Assertions.assertEquals(true,
           jCluster.getJournalNode(i).getJournalSyncerStatus("ns1"));
     }
 
@@ -309,9 +309,9 @@
     // JournalNodeSyncer alone (as the edit log queueing has been disabled)
     long numEditLogsSynced = jCluster.getJournalNode(0).getOrCreateJournal(jid)
         .getMetrics().getNumEditLogsSynced().value();
-    Assert.assertTrue("Edit logs downloaded outside syncer. Expected 8 or " +
-            "more downloads, got " + numEditLogsSynced + " downloads instead",
-        numEditLogsSynced >= 8);
+      Assertions.assertTrue(
+              numEditLogsSynced >= 8, "Edit logs downloaded outside syncer. Expected 8 or " +
+              "more downloads, got " + numEditLogsSynced + " downloads instead");
   }
 
   // Test JournalNode Sync when a JN is formatted while NN is actively writing
@@ -382,13 +382,13 @@
           HdfsConstants.RollingUpgradeAction.PREPARE);
 
     //query rolling upgrade
-    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+    Assertions.assertEquals(info, dfsActive.rollingUpgrade(
         HdfsConstants.RollingUpgradeAction.QUERY));
 
     // Restart the Standby NN with rollingUpgrade option
     dfsCluster.restartNameNode(standbyNNindex, true,
         "-rollingUpgrade", "started");
-    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+    Assertions.assertEquals(info, dfsActive.rollingUpgrade(
         HdfsConstants.RollingUpgradeAction.QUERY));
 
     // Do some edits and delete some edit logs
@@ -410,13 +410,13 @@
     standbyNNindex=((activeNNindex+1)%2);
     dfsActive = dfsCluster.getFileSystem(activeNNindex);
 
-    Assert.assertTrue(dfsCluster.getNameNode(activeNNindex).isActiveState());
-    Assert.assertFalse(dfsCluster.getNameNode(standbyNNindex).isActiveState());
+    Assertions.assertTrue(dfsCluster.getNameNode(activeNNindex).isActiveState());
+    Assertions.assertFalse(dfsCluster.getNameNode(standbyNNindex).isActiveState());
 
     // Restart the current standby NN (previously active)
     dfsCluster.restartNameNode(standbyNNindex, true,
         "-rollingUpgrade", "started");
-    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+    Assertions.assertEquals(info, dfsActive.rollingUpgrade(
         HdfsConstants.RollingUpgradeAction.QUERY));
     dfsCluster.waitActive();
 
@@ -429,12 +429,12 @@
     //finalize rolling upgrade
     final RollingUpgradeInfo finalize = dfsActive.rollingUpgrade(
         HdfsConstants.RollingUpgradeAction.FINALIZE);
-    Assert.assertTrue(finalize.isFinalized());
+    Assertions.assertTrue(finalize.isFinalized());
 
     // Check the missing edit logs exist after finalizing rolling upgrade
     for (File editLog : missingLogs) {
-      Assert.assertTrue("Edit log missing after finalizing rolling upgrade",
-          editLog.exists());
+        Assertions.assertTrue(
+                editLog.exists(), "Edit log missing after finalizing rolling upgrade");
     }
   }
 
@@ -446,7 +446,7 @@
       logFile = getLogFile(currentDir, startTxId);
     }
     File deleteFile = logFile.getFile();
-    Assert.assertTrue("Couldn't delete edit log file", deleteFile.delete());
+      Assertions.assertTrue(deleteFile.delete(), "Couldn't delete edit log file");
 
     return deleteFile;
   }
@@ -521,7 +521,7 @@
     long lastWrittenTxId = dfsCluster.getNameNode(activeNNindex).getFSImage()
         .getEditLog().getLastWrittenTxId();
     for (int i = 1; i <= numEdits; i++) {
-      Assert.assertTrue("Failed to do an edit", doAnEdit());
+        Assertions.assertTrue(doAnEdit(), "Failed to do an edit");
     }
     dfsCluster.getNameNode(activeNNindex).getRpcServer().rollEditLog();
     return lastWrittenTxId;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
index 2a178a15..eb49027 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
@@ -32,16 +32,13 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.createGabageTxns;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.createTxnData;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 
 /**
@@ -56,7 +53,7 @@
       PathUtils.getTestDir(TestJournaledEditsCache.class, false);
   private JournaledEditsCache cache;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY,
@@ -65,7 +62,7 @@
     TEST_DIR.mkdirs();
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws Exception {
     FileUtils.deleteQuietly(TEST_DIR);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
index 077b103..2395493f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
@@ -20,9 +20,7 @@
 
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
@@ -56,10 +54,10 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestDelegationToken {
   private MiniDFSCluster cluster;
@@ -68,7 +66,7 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(TestDelegationToken.class);
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
@@ -83,7 +81,7 @@
         cluster.getNamesystem());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if(cluster!=null) {
       cluster.shutdown();
@@ -105,7 +103,7 @@
     // Fake renewer should not be able to renew
     try {
   	  dtSecretManager.renewToken(token, "FakeRenewer");
-  	  Assert.fail("should have failed");
+  	  Assertions.fail("should have failed");
     } catch (AccessControlException ace) {
       // PASS
     }
@@ -114,14 +112,14 @@
     byte[] tokenId = token.getIdentifier();
     identifier.readFields(new DataInputStream(
              new ByteArrayInputStream(tokenId)));
-    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    Assertions.assertTrue(null != dtSecretManager.retrievePassword(identifier));
     LOG.info("Sleep to expire the token");
 	  Thread.sleep(6000);
 	  //Token should be expired
 	  try {
 	    dtSecretManager.retrievePassword(identifier);
 	    //Should not come here
-	    Assert.fail("Token should have expired");
+	    Assertions.fail("Token should have expired");
 	  } catch (InvalidToken e) {
 	    //Success
 	  }
@@ -130,7 +128,7 @@
 	  Thread.sleep(5000);
 	  try {
   	  dtSecretManager.renewToken(token, "JobTracker");
-  	  Assert.fail("should have been expired");
+  	  Assertions.fail("should have been expired");
 	  } catch (InvalidToken it) {
 	    // PASS
 	  }
@@ -143,14 +141,14 @@
     //Fake renewer should not be able to renew
     try {
       dtSecretManager.cancelToken(token, "FakeCanceller");
-      Assert.fail("should have failed");
+      Assertions.fail("should have failed");
     } catch (AccessControlException ace) {
       // PASS
     }
     dtSecretManager.cancelToken(token, "JobTracker");
     try {
       dtSecretManager.renewToken(token, "JobTracker");
-      Assert.fail("should have failed");
+      Assertions.fail("should have failed");
     } catch (InvalidToken it) {
       // PASS
     }
@@ -182,13 +180,13 @@
     DistributedFileSystem dfs = cluster.getFileSystem();
     Credentials creds = new Credentials();
     final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
-    Assert.assertEquals(1, tokens.length);
-    Assert.assertEquals(1, creds.numberOfTokens());
+    Assertions.assertEquals(1, tokens.length);
+    Assertions.assertEquals(1, creds.numberOfTokens());
     checkTokenIdentifier(ugi, tokens[0]);
 
     final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds);
-    Assert.assertEquals(0, tokens2.length); // already have token
-    Assert.assertEquals(1, creds.numberOfTokens());
+    Assertions.assertEquals(0, tokens2.length); // already have token
+    Assertions.assertEquals(1, creds.numberOfTokens());
   }
   
   @Test
@@ -210,12 +208,12 @@
     { //test addDelegationTokens(..)
       Credentials creds = new Credentials();
       final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
-      Assert.assertEquals(1, tokens.length);
-      Assert.assertEquals(1, creds.numberOfTokens());
-      Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
+      Assertions.assertEquals(1, tokens.length);
+      Assertions.assertEquals(1, creds.numberOfTokens());
+      Assertions.assertSame(tokens[0], creds.getAllTokens().iterator().next());
       checkTokenIdentifier(ugi, tokens[0]);
       final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
-      Assert.assertEquals(0, tokens2.length);
+      Assertions.assertEquals(0, tokens2.length);
     }
   }
 
@@ -224,7 +222,7 @@
     final DistributedFileSystem dfs = cluster.getFileSystem();
     final Credentials creds = new Credentials();
     final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
-    Assert.assertEquals(1, tokens.length);
+    Assertions.assertEquals(1, tokens.length);
     @SuppressWarnings("unchecked")
     final Token<DelegationTokenIdentifier> token =
         (Token<DelegationTokenIdentifier>) tokens[0];
@@ -238,7 +236,7 @@
         try {
           token.renew(config);
         } catch (Exception e) {
-          Assert.fail("Could not renew delegation token for user "+longUgi);
+          Assertions.fail("Could not renew delegation token for user "+longUgi);
         }
         return null;
       }
@@ -256,7 +254,7 @@
         try {
           token.cancel(config);
         } catch (Exception e) {
-          Assert.fail("Could not cancel delegation token for user "+longUgi);
+          Assertions.fail("Could not cancel delegation token for user "+longUgi);
         }
         return null;
       }
@@ -267,7 +265,7 @@
   public void testDelegationTokenUgi() throws Exception {
     final DistributedFileSystem dfs = cluster.getFileSystem();
     Token<?>[] tokens = dfs.addDelegationTokens("renewer", null);
-    Assert.assertEquals(1, tokens.length);
+    Assertions.assertEquals(1, tokens.length);
     Token<?> token1 = tokens[0];
     DelegationTokenIdentifier ident =
         (DelegationTokenIdentifier) token1.decodeIdentifier();
@@ -278,18 +276,18 @@
     for (int i=0; i<2; i++) {
       DelegationTokenIdentifier identClone =
           (DelegationTokenIdentifier)token1.decodeIdentifier();
-      Assert.assertEquals(ident, identClone);
-      Assert.assertNotSame(ident, identClone);
-      Assert.assertSame(expectedUgi, identClone.getUser());
-      Assert.assertSame(expectedUgi, identClone.getUser());
+      Assertions.assertEquals(ident, identClone);
+      Assertions.assertNotSame(ident, identClone);
+      Assertions.assertSame(expectedUgi, identClone.getUser());
+      Assertions.assertSame(expectedUgi, identClone.getUser());
     }
 
     // a new token must decode to a different ugi instance than the first token
     tokens = dfs.addDelegationTokens("renewer", null);
-    Assert.assertEquals(1, tokens.length);
+    Assertions.assertEquals(1, tokens.length);
     Token<?> token2 = tokens[0];
-    Assert.assertNotEquals(token1, token2);
-    Assert.assertNotSame(expectedUgi, token2.decodeIdentifier().getUser());
+    Assertions.assertNotEquals(token1, token2);
+    Assertions.assertNotSame(expectedUgi, token2.decodeIdentifier().getUser());
   }
 
   /**
@@ -315,17 +313,17 @@
     assertTrue(nn.isInSafeMode());
     DelegationTokenSecretManager sm =
       NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
-    assertFalse("Secret manager should not run in safe mode", sm.isRunning());
+      assertFalse(sm.isRunning(), "Secret manager should not run in safe mode");
     
     NameNodeAdapter.leaveSafeMode(nn);
-    assertTrue("Secret manager should start when safe mode is exited",
-        sm.isRunning());
+      assertTrue(
+              sm.isRunning(), "Secret manager should start when safe mode is exited");
     
     LOG.info("========= entering safemode again");
     
     NameNodeAdapter.enterSafeMode(nn, false);
-    assertFalse("Secret manager should stop again when safe mode " +
-        "is manually entered", sm.isRunning());
+      assertFalse(sm.isRunning(), "Secret manager should stop again when safe mode " +
+              "is manually entered");
     
     // Set the cluster to leave safemode quickly on its own.
     cluster.getConfiguration(0).setInt(
@@ -342,7 +340,7 @@
   @SuppressWarnings("unchecked")
   private void checkTokenIdentifier(UserGroupInformation ugi, final Token<?> token)
       throws Exception {
-    Assert.assertNotNull(token);
+    Assertions.assertNotNull(token);
     // should be able to use token.decodeIdentifier() but webhdfs isn't
     // registered with the service loader for token decoding
     DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
@@ -353,9 +351,9 @@
     } finally {
       in.close();
     }
-    Assert.assertNotNull(identifier);
+    Assertions.assertNotNull(identifier);
     LOG.info("A valid token should have non-null password, and should be renewed successfully");
-    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    Assertions.assertTrue(null != dtSecretManager.retrievePassword(identifier));
     dtSecretManager.renewToken((Token<DelegationTokenIdentifier>) token, "JobTracker");
     ugi.doAs(
         new PrivilegedExceptionAction<Object>() {
@@ -372,7 +370,7 @@
   public void testDelegationTokenIdentifierToString() throws Exception {
     DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
         "SomeUser"), new Text("JobTracker"), null);
-    Assert.assertEquals("HDFS_DELEGATION_TOKEN token 0" +
+    Assertions.assertEquals("HDFS_DELEGATION_TOKEN token 0" +
         " for SomeUser with renewer JobTracker",
         dtId.toStringStable());
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
index d79ec61..ce601270 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
@@ -50,10 +50,10 @@
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestDelegationTokenForProxyUser {
   private static MiniDFSCluster cluster;
@@ -96,7 +96,7 @@
         builder.toString());
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     config = new HdfsConfiguration();
     config.setLong(
@@ -118,7 +118,7 @@
         GROUP_NAMES);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if(cluster!=null) {
       cluster.shutdown();
@@ -139,8 +139,8 @@
       byte[] tokenId = tokens[0].getIdentifier();
       identifier.readFields(new DataInputStream(new ByteArrayInputStream(
           tokenId)));
-      Assert.assertEquals(identifier.getUser().getUserName(), PROXY_USER);
-      Assert.assertEquals(identifier.getUser().getRealUser().getUserName(),
+      Assertions.assertEquals(identifier.getUser().getUserName(), PROXY_USER);
+      Assertions.assertEquals(identifier.getUser().getRealUser().getUserName(),
           REAL_USER);
     } catch (InterruptedException e) {
       //Do Nothing
@@ -161,7 +161,7 @@
     {
       Path responsePath = webhdfs.getHomeDirectory();
       WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
-      Assert.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER, responsePath.toString());
+      Assertions.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER, responsePath.toString());
     }
 
     final Path f = new Path("/testWebHdfsDoAs/a.txt");
@@ -172,7 +172,7 @@
   
       final FileStatus status = webhdfs.getFileStatus(f);
       WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
-      Assert.assertEquals(PROXY_USER, status.getOwner());
+      Assertions.assertEquals(PROXY_USER, status.getOwner());
     }
 
     {
@@ -183,7 +183,7 @@
       final FileStatus status = webhdfs.getFileStatus(f);
       WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
       WebHdfsTestUtil.LOG.info("status.getLen()  =" + status.getLen());
-      Assert.assertEquals(PROXY_USER, status.getOwner());
+      Assertions.assertEquals(PROXY_USER, status.getOwner());
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index d08276b..a5bf2b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -19,11 +19,7 @@
 package org.apache.hadoop.hdfs.security.token.block;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -87,10 +83,10 @@
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -125,7 +121,7 @@
   final ExtendedBlock block2 = new ExtendedBlock("10", 10L);
   final ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
 
-  @Before
+  @BeforeEach
   public void disableKerberos() {
     Configuration conf = new Configuration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
@@ -152,12 +148,12 @@
           (GetReplicaVisibleLengthRequestProto) args[1];
       Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
           .getTokenIdentifiers();
-      assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
+        assertEquals(1, tokenIds.size(), "Only one BlockTokenIdentifier expected");
       long result = 0;
       for (TokenIdentifier tokenId : tokenIds) {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
         LOG.info("Got: " + id.toString());
-        assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
+          assertTrue(ident.equals(id), "Received BlockTokenIdentifier is wrong");
         sm.checkAccess(id, null, PBHelperClient.convert(req.getBlock()),
             BlockTokenIdentifier.AccessMode.WRITE,
             new StorageType[]{StorageType.DEFAULT}, null);
@@ -376,7 +372,7 @@
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
 
-    Assume.assumeTrue(FD_DIR.exists());
+    Assumptions.assumeTrue(FD_DIR.exists());
     BlockTokenSecretManager sm = new BlockTokenSecretManager(
         blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
         enableProtobuf);
@@ -534,7 +530,7 @@
       }
       Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock()
           .getBlockToken();
-      Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
+      Assertions.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
       out.close();
     } finally {
       cluster.shutdown();
@@ -866,10 +862,10 @@
       int rangeStart = nnIdx * interval;
       for(int i = 0; i < interval * 3; i++) {
         int serialNo = sm.getSerialNoForTesting();
-        assertTrue(
-            "serialNo " + serialNo + " is not in the designated range: [" +
-                rangeStart + ", " + (rangeStart + interval) + ")",
-                serialNo >= rangeStart && serialNo < (rangeStart + interval));
+          assertTrue(
+                  serialNo >= rangeStart && serialNo < (rangeStart + interval),
+                  "serialNo " + serialNo + " is not in the designated range: [" +
+                          rangeStart + ", " + (rangeStart + interval) + ")");
         sm.updateKeys();
       }
     }
@@ -958,10 +954,10 @@
       byte[] readData = new byte[data.length];
       long startTime = System.currentTimeMillis();
       in.read(readData);
-      // DFSInputStream#refetchLocations() minimum wait for 1sec to refetch
-      // complete located blocks.
-      assertTrue("Should not wait for refetch complete located blocks",
-          1000L > (System.currentTimeMillis() - startTime));
+        // DFSInputStream#refetchLocations() minimum wait for 1sec to refetch
+        // complete located blocks.
+        assertTrue(
+                1000L > (System.currentTimeMillis() - startTime), "Should not wait for refetch complete located blocks");
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/aliasmap/ITestInMemoryAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/aliasmap/ITestInMemoryAliasMap.java
index 2785f68..91c866d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/aliasmap/ITestInMemoryAliasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/aliasmap/ITestInMemoryAliasMap.java
@@ -22,13 +22,11 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -46,7 +44,7 @@
   private File tempDirectory;
   private static String bpid = "bpid-0";
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     Configuration conf = new Configuration();
     File temp = Files.createTempDirectory("seagull").toFile();
@@ -57,7 +55,7 @@
     aliasMap = InMemoryAliasMap.init(conf, bpid);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     aliasMap.close();
     FileUtils.deleteDirectory(tempDirectory);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/aliasmap/TestSecureAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/aliasmap/TestSecureAliasMap.java
index a835301..ef7f7d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/aliasmap/TestSecureAliasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/aliasmap/TestSecureAliasMap.java
@@ -39,18 +39,16 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.io.IOException;
 import java.util.Properties;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test DN & NN communication in secured hdfs with alias map.
@@ -66,7 +64,7 @@
   private HdfsConfiguration conf;
   private FileSystem fs;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     baseDir =
         GenericTestUtils.getTestDir(TestSecureAliasMap.class.getSimpleName());
@@ -81,8 +79,8 @@
     SecurityUtil.setAuthenticationMethod(
         UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
     UserGroupInformation.setConfiguration(baseConf);
-    assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+      assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     String userName = UserGroupInformation.getLoginUser().getShortUserName();
     File keytabFile = new File(baseDir, userName + ".keytab");
@@ -98,7 +96,7 @@
         kdc.getRealm(), keytab, keystoresDir, sslConfDir);
   }
 
-  @AfterClass
+  @AfterAll
   public static void destroy() throws Exception {
     if (kdc != null) {
       kdc.stop();
@@ -107,7 +105,7 @@
     KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws IOException {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {
@@ -146,10 +144,10 @@
     }
 
     String[] bps = providedVolume.getBlockPoolList();
-    assertEquals("Missing provided volume", 1, bps.length);
+      assertEquals(1, bps.length, "Missing provided volume");
 
     BlockAliasMap aliasMap = blockManager.getProvidedStorageMap().getAliasMap();
     BlockAliasMap.Reader reader = aliasMap.getReader(null, bps[0]);
-    assertNotNull("Failed to create blockAliasMap reader", reader);
+      assertNotNull(reader, "Failed to create blockAliasMap reader");
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 2070a33..75aeff0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -40,10 +40,7 @@
 
 import java.lang.reflect.Field;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.junit.AfterClass;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyLong;
 import static org.mockito.Mockito.doAnswer;
@@ -68,8 +65,6 @@
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.lang3.StringUtils;
-import org.junit.Assert;
-import org.junit.Before;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -116,8 +111,11 @@
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.slf4j.event.Level;
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -150,14 +148,14 @@
   private AtomicLong startGetBlocksTime;
   private AtomicLong endGetBlocksTime;
 
-  @Before
+  @BeforeEach
   public void setup() {
     numGetBlocksCalls = new AtomicInteger(0);
     startGetBlocksTime = new AtomicLong(Long.MAX_VALUE);
     endGetBlocksTime = new AtomicLong(Long.MIN_VALUE);
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -230,8 +228,8 @@
         UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
     KerberosName.resetDefaultRealm();
-    assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+      assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     keytabFile = new File(baseDir, username + ".keytab");
     String keytab = keytabFile.getAbsolutePath();
@@ -270,7 +268,7 @@
     initConf(conf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void destroy() throws Exception {
     if (kdc != null) {
       kdc.stop();
@@ -911,7 +909,7 @@
     tool.setConf(conf);
     final int r = tool.run(args.toArray(new String[0])); // start rebalancing
 
-    assertEquals("Tools should exit 0 on success", 0, r);
+      assertEquals(0, r, "Tools should exit 0 on success");
     waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
     LOG.info("Rebalancing with default ctor.");
     waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, expectedExcludedNodes);
@@ -1263,7 +1261,7 @@
       Object hotBlockTimeInterval = field1.get(dispatcher);
       assertEquals(1000, (long)hotBlockTimeInterval);
     } catch (Exception e) {
-      Assert.fail(e.getMessage());
+      Assertions.fail(e.getMessage());
     }
   }
 
@@ -1575,25 +1573,25 @@
         .create(Balancer.BALANCER_ID_PATH, false);
     out.writeBytes(InetAddress.getLocalHost().getHostName());
     out.hflush();
-    assertTrue("'balancer.id' file doesn't exist!",
-        fs.exists(Balancer.BALANCER_ID_PATH));
+      assertTrue(
+              fs.exists(Balancer.BALANCER_ID_PATH), "'balancer.id' file doesn't exist!");
 
     // start second balancer
     final String[] args = { "-policy", "datanode" };
     final Tool tool = new Cli();
     tool.setConf(conf);
     int exitCode = tool.run(args); // start balancing
-    assertEquals("Exit status code mismatches",
-        ExitStatus.IO_EXCEPTION.getExitCode(), exitCode);
+      assertEquals(
+              ExitStatus.IO_EXCEPTION.getExitCode(), exitCode, "Exit status code mismatches");
 
     // Case2: Release lease so that another balancer would be able to
     // perform balancing.
     out.close();
-    assertTrue("'balancer.id' file doesn't exist!",
-        fs.exists(Balancer.BALANCER_ID_PATH));
+      assertTrue(
+              fs.exists(Balancer.BALANCER_ID_PATH), "'balancer.id' file doesn't exist!");
     exitCode = tool.run(args); // start balancing
-    assertEquals("Exit status code mismatches",
-        ExitStatus.SUCCESS.getExitCode(), exitCode);
+      assertEquals(
+              ExitStatus.SUCCESS.getExitCode(), exitCode, "Exit status code mismatches");
   }
 
   public void integrationTestWithStripedFile(Configuration conf) throws Exception {
@@ -1742,15 +1740,15 @@
         // also includes the time it took to perform the block move ops in the
         // first iteration
         new PortNumberBasedNodes(1, 0, 0), false, false, true, 0.5);
-    assertTrue("Number of getBlocks should be not less than " +
-        getBlocksMaxQps, numGetBlocksCalls.get() >= getBlocksMaxQps);
+      assertTrue(numGetBlocksCalls.get() >= getBlocksMaxQps, "Number of getBlocks should be not less than " +
+              getBlocksMaxQps);
     long durationMs = 1 + endGetBlocksTime.get() - startGetBlocksTime.get();
     int durationSec = (int) Math.ceil(durationMs / 1000.0);
     LOG.info("Balancer executed {} getBlocks in {} msec (round up to {} sec)",
         numGetBlocksCalls.get(), durationMs, durationSec);
     long getBlockCallsPerSecond = numGetBlocksCalls.get() / durationSec;
-    assertTrue("Expected balancer getBlocks calls per second <= " +
-        getBlocksMaxQps, getBlockCallsPerSecond <= getBlocksMaxQps);
+      assertTrue(getBlockCallsPerSecond <= getBlocksMaxQps, "Expected balancer getBlocks calls per second <= " +
+              getBlocksMaxQps);
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java
index 0d6300c..0265f83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java
@@ -47,8 +47,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -74,8 +74,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Some long running Balancer tasks.
@@ -98,7 +98,7 @@
   private final static Path FILE_PATH = new Path(FILE_NAME);
   private MiniDFSCluster cluster;
 
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -651,23 +651,23 @@
       maxUsage = Math.max(maxUsage, datanodeReport[i].getDfsUsed());
     }
 
-    // The 95% usage DN will have 9 blocks of 100B and 1 block of 50B - all for the same file.
-    // The HDFS balancer will choose a block to move from this node randomly. More likely it will
-    // be 100B block. Since 100B is greater than DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY which is 99L,
-    // it will stop here. Total bytes moved from this 95% DN will be 1 block of size 100B.
-    // However, chances are the first block selected to be moved from this 95% DN is the 50B block.
-    // After this block is moved, the total moved size so far would be 50B which is smaller than
-    // DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY (99L), hence it will try to move another block.
-    // The second block will always be of size 100B. So total bytes moved from this 95% DN will be
-    // 2 blocks of size (100B + 50B) 150B.
-    // Hence, overall total blocks moved by HDFS balancer would be either of these 2 options:
-    // a) 2 blocks of total size (100B + 100B)
-    // b) 3 blocks of total size (50B + 100B + 100B)
-    assertTrue("BalancerResult is not as expected. " + balancerResult,
-        (balancerResult.getBytesAlreadyMoved() == 200
-            && balancerResult.getBlocksMoved() == 2)
-            || (balancerResult.getBytesAlreadyMoved() == 250
-            && balancerResult.getBlocksMoved() == 3));
+      // The 95% usage DN will have 9 blocks of 100B and 1 block of 50B - all for the same file.
+      // The HDFS balancer will choose a block to move from this node randomly. More likely it will
+      // be 100B block. Since 100B is greater than DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY which is 99L,
+      // it will stop here. Total bytes moved from this 95% DN will be 1 block of size 100B.
+      // However, chances are the first block selected to be moved from this 95% DN is the 50B block.
+      // After this block is moved, the total moved size so far would be 50B which is smaller than
+      // DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY (99L), hence it will try to move another block.
+      // The second block will always be of size 100B. So total bytes moved from this 95% DN will be
+      // 2 blocks of size (100B + 50B) 150B.
+      // Hence, overall total blocks moved by HDFS balancer would be either of these 2 options:
+      // a) 2 blocks of total size (100B + 100B)
+      // b) 3 blocks of total size (50B + 100B + 100B)
+      assertTrue(
+              (balancerResult.getBytesAlreadyMoved() == 200
+                      && balancerResult.getBlocksMoved() == 2)
+                      || (balancerResult.getBytesAlreadyMoved() == 250
+                      && balancerResult.getBlocksMoved() == 3), "BalancerResult is not as expected. " + balancerResult);
     // 100% and 95% used nodes will be balanced, so top used will be 900
     assertEquals(900, maxUsage);
   }
@@ -723,14 +723,14 @@
           LOG.info("NNC to work on: " + nnc);
           Balancer b = new Balancer(nnc, bParams, conf);
           Balancer.Result r = b.runOneIteration();
-          // Since no block can be moved in 500 milli-seconds (i.e.,
-          // 4MB/s * 0.5s = 2MB < 10MB), NO_MOVE_PROGRESS will be reported.
-          // When a block move is not canceled in 500 ms properly
-          // (highly unlikely) and then a block is moved unexpectedly,
-          // IN_PROGRESS will be reported. This is highly unlikely unexpected
-          // case. See HDFS-15989.
-          assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.",
-              ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus());
+            // Since no block can be moved in 500 milli-seconds (i.e.,
+            // 4MB/s * 0.5s = 2MB < 10MB), NO_MOVE_PROGRESS will be reported.
+            // When a block move is not canceled in 500 ms properly
+            // (highly unlikely) and then a block is moved unexpectedly,
+            // IN_PROGRESS will be reported. This is highly unlikely unexpected
+            // case. See HDFS-15989.
+            assertEquals(
+                    ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus(), "We expect ExitStatus.NO_MOVE_PROGRESS to be reported.");
           assertEquals(0, r.getBlocksMoved());
         }
       } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerRPCDelay.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerRPCDelay.java
index 9752d65..26ca047 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerRPCDelay.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerRPCDelay.java
@@ -18,10 +18,10 @@
 package org.apache.hadoop.hdfs.server.balancer;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 /**
@@ -34,13 +34,13 @@
 
   private TestBalancer testBalancer;
 
-  @Before
+  @BeforeEach
   public void setup() {
     testBalancer = new TestBalancer();
     testBalancer.setup();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws Exception {
     if (testBalancer != null) {
       testBalancer.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java
index f1fab27..42d5f81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java
@@ -34,9 +34,7 @@
 
 import java.util.concurrent.TimeUnit;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test balancer run as a service.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
index 50d4eae..df3cb0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
@@ -20,14 +20,14 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestBalancerWithEncryptedTransfer {
   
   private final Configuration conf = new HdfsConfiguration();
   
-  @Before
+  @BeforeEach
   public void setUpConf() {
     conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index a9c8136..06dcb85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -20,8 +20,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ALLOW_STALE_READ_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.times;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
index e364ae6..db8d59b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
@@ -51,8 +51,8 @@
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 /**
  * Test balancer with multiple NameNodes
@@ -184,7 +184,7 @@
     // start rebalancing
     final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(s.conf);
     final int r = Balancer.run(namenodes, s.parameters, s.conf);
-    Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
+    Assertions.assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
 
     LOG.info("BALANCER 2");
     wait(s, totalUsed, totalCapacity);
@@ -201,7 +201,7 @@
       for(int n = 0; n < s.clients.length; n++) {
         final DatanodeInfo[] datanodes = s.clients[n].getDatanodeReport(
             DatanodeReportType.ALL);
-        Assert.assertEquals(datanodes.length, used.length);
+        Assertions.assertEquals(datanodes.length, used.length);
 
         for(int d = 0; d < datanodes.length; d++) {
           if (n == 0) {
@@ -213,8 +213,8 @@
                   + ", getCapacity()=" + datanodes[d].getCapacity());
             }
           } else {
-            Assert.assertEquals(used[d], datanodes[d].getDfsUsed());
-            Assert.assertEquals(cap[d], datanodes[d].getCapacity());
+            Assertions.assertEquals(used[d], datanodes[d].getDfsUsed());
+            Assertions.assertEquals(cap[d], datanodes[d].getCapacity());
           }
           bpUsed[n][d] = datanodes[d].getBlockPoolUsed();
         }
@@ -266,7 +266,7 @@
     // cluster is balanced, verify that only selected blockpools were touched
     Map<Integer, DatanodeStorageReport[]> postBalancerPoolUsages =
         getStorageReports(s);
-    Assert.assertEquals(preBalancerPoolUsages.size(),
+    Assertions.assertEquals(preBalancerPoolUsages.size(),
         postBalancerPoolUsages.size());
     for (Map.Entry<Integer, DatanodeStorageReport[]> entry
         : preBalancerPoolUsages.entrySet()) {
@@ -284,14 +284,14 @@
    */
   private static void compareTotalPoolUsage(DatanodeStorageReport[] preReports,
       DatanodeStorageReport[] postReports) {
-    Assert.assertNotNull(preReports);
-    Assert.assertNotNull(postReports);
-    Assert.assertEquals(preReports.length, postReports.length);
+    Assertions.assertNotNull(preReports);
+    Assertions.assertNotNull(postReports);
+    Assertions.assertEquals(preReports.length, postReports.length);
     for (DatanodeStorageReport preReport : preReports) {
       String dnUuid = preReport.getDatanodeInfo().getDatanodeUuid();
       for(DatanodeStorageReport postReport : postReports) {
         if(postReport.getDatanodeInfo().getDatanodeUuid().equals(dnUuid)) {
-          Assert.assertEquals(getTotalPoolUsage(preReport),
+          Assertions.assertEquals(getTotalPoolUsage(preReport),
               getTotalPoolUsage(postReport));
           LOG.info("Comparision of datanode pool usage pre/post balancer run. "
               + "PrePoolUsage: " + getTotalPoolUsage(preReport)
@@ -490,7 +490,7 @@
     final long[] capacities = new long[nDataNodes];
     Arrays.fill(capacities, CAPACITY);
     LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
-    Assert.assertEquals(nDataNodes, racks.length);
+    Assertions.assertEquals(nDataNodes, racks.length);
 
     LOG.info("RUN_TEST -1: start a cluster with nNameNodes=" + nNameNodes
         + ", nDataNodes=" + nDataNodes);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
index 28dc9a0..ba1a517 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.balancer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
+import static org.junit.jupiter.api.Assertions.*;
 import java.io.IOException;
 import java.net.URI;
 import java.util.Collection;
@@ -195,8 +192,8 @@
     // start rebalancing
     Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
     final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
-    assertEquals("Balancer did not exit with NO_MOVE_PROGRESS",
-        ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
+      assertEquals(
+              ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r, "Balancer did not exit with NO_MOVE_PROGRESS");
     waitForHeartBeat(totalUsedSpace, totalCapacity);
     LOG.info("Rebalancing with default factor.");
   }
@@ -218,8 +215,8 @@
     NetworkTopology topology =
         cluster.getNamesystem().getBlockManager().getDatanodeManager().
             getNetworkTopology();
-    assertTrue("must be an instance of NetworkTopologyWithNodeGroup",
-        topology instanceof NetworkTopologyWithNodeGroup);
+      assertTrue(
+              topology instanceof NetworkTopologyWithNodeGroup, "must be an instance of NetworkTopologyWithNodeGroup");
   }
 
   private void verifyProperBlockPlacement(String file,
@@ -228,13 +225,13 @@
         cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy();
     List<LocatedBlock> locatedBlocks = client.
         getBlockLocations(file, 0, length).getLocatedBlocks();
-    assertFalse("No blocks found for file " + file, locatedBlocks.isEmpty());
+      assertFalse(locatedBlocks.isEmpty(), "No blocks found for file " + file);
     for (LocatedBlock locatedBlock : locatedBlocks) {
       BlockPlacementStatus status = placementPolicy.verifyBlockPlacement(
           locatedBlock.getLocations(), numOfReplicas);
-      assertTrue("Block placement policy was not satisfied for block " +
-          locatedBlock.getBlock().getBlockId(),
-          status.isPlacementPolicySatisfied());
+        assertTrue(
+                status.isPlacementPolicySatisfied(), "Block placement policy was not satisfied for block " +
+                locatedBlock.getBlock().getBlockId());
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java
index b579c89..19e3f8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.balancer;
 
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestBalancerWithSaslDataTransfer extends SaslDataTransferTestCase {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
index cc26702..a0301b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
@@ -26,12 +26,12 @@
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.FakeTimer;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -70,8 +70,8 @@
         "timer", fakeTimer);
     final DataEncryptionKey dek = keyManager.newDataEncryptionKey();
     final long remainingTime = dek.expiryDate - fakeTimer.now();
-    assertEquals("KeyManager dataEncryptionKey should expire in 2 seconds",
-        keyUpdateInterval, remainingTime);
+      assertEquals(
+              keyUpdateInterval, remainingTime, "KeyManager dataEncryptionKey should expire in 2 seconds");
     // advance the timer to expire the block key and data encryption key
     fakeTimer.advance(keyUpdateInterval + 1);
 
@@ -79,9 +79,9 @@
     // regenerate a valid data encryption key using the current block key.
     final DataEncryptionKey dekAfterExpiration =
         keyManager.newDataEncryptionKey();
-    assertNotEquals("KeyManager should generate a new data encryption key",
-        dek, dekAfterExpiration);
-    assertTrue("KeyManager has an expired DataEncryptionKey!",
-        dekAfterExpiration.expiryDate > fakeTimer.now());
+      assertNotEquals(
+              dek, dekAfterExpiration, "KeyManager should generate a new data encryption key");
+    assertTrue(dekAfterExpiration.expiryDate > fakeTimer.now(),
+        "KeyManager has an expired DataEncryptionKey!");
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java
index ec86093..32ca90c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java
@@ -35,8 +35,8 @@
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 abstract public class BaseReplicationPolicyTest {
@@ -68,7 +68,7 @@
 
   abstract DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf);
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     Configuration conf = new HdfsConfiguration();
     dataNodes = getDatanodeDescriptors(conf);
@@ -111,7 +111,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     namenode.stop();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index 2b8804c..36f8cbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -34,8 +34,7 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Assert;
-
+import org.junit.jupiter.api.Assertions;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 
 public class BlockManagerTestUtil {
@@ -242,7 +241,7 @@
           theDND = dnd;
         }
       }
-      Assert.assertNotNull("Could not find DN with name: " + dnName, theDND);
+        Assertions.assertNotNull(theDND, "Could not find DN with name: " + dnName);
       
       synchronized (hbm) {
         DFSTestUtil.setDatanodeDead(theDND);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBPPBalanceLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBPPBalanceLocal.java
index a5920c4..ae8a2b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBPPBalanceLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBPPBalanceLocal.java
@@ -28,9 +28,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -53,7 +53,7 @@
   private static NameNode namenode;
   private static NetworkTopology cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws Exception {
     conf = new HdfsConfiguration();
     conf.setFloat(
@@ -137,8 +137,8 @@
               .chooseTarget(FILE, 1, localNode,
                   new ArrayList<DatanodeStorageInfo>(), false, null, BLOCK_SIZE,
                   TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
-      Assert.assertEquals(1, targets.length);
-      Assert.assertEquals(localNode, targets[0].getDatanodeDescriptor());
+      Assertions.assertEquals(1, targets.length);
+      Assertions.assertEquals(localNode, targets[0].getDatanodeDescriptor());
     }
   }
 
@@ -154,11 +154,11 @@
                   new ArrayList<DatanodeStorageInfo>(), false, null, BLOCK_SIZE,
                   TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
 
-      Assert.assertEquals(1, targets.length);
+      Assertions.assertEquals(1, targets.length);
       if (localNode == targets[0].getDatanodeDescriptor()) {
         numLocalChosen++;
       }
     }
-    Assert.assertTrue(numLocalChosen < (CHOOSE_TIMES - numLocalChosen));
+    Assertions.assertTrue(numLocalChosen < (CHOOSE_TIMES - numLocalChosen));
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java
index f58961e..2d1de6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java
@@ -34,11 +34,11 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestAvailableSpaceBlockPlacementPolicy {
   private final static int numRacks = 4;
@@ -55,7 +55,7 @@
   private static BlockPlacementPolicy placementPolicy;
   private static NetworkTopology cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws Exception {
     conf = new HdfsConfiguration();
     conf.setFloat(
@@ -173,7 +173,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() {
     if (namenode != null) {
       namenode.stop();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceRackFaultTolerantBPP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceRackFaultTolerantBPP.java
index 179c6c6..6941b75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceRackFaultTolerantBPP.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceRackFaultTolerantBPP.java
@@ -29,10 +29,10 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -40,7 +40,7 @@
 import java.util.Collections;
 import java.util.HashSet;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Tests AvailableSpaceRackFaultTolerant block placement policy.
@@ -60,7 +60,7 @@
   private static BlockPlacementPolicy placementPolicy;
   private static NetworkTopology cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws Exception {
     conf = new HdfsConfiguration();
     conf.setFloat(
@@ -139,7 +139,7 @@
    */
   @Test
   public void testPolicyReplacement() {
-    Assert.assertTrue(
+    Assertions.assertTrue(
         (placementPolicy instanceof
             AvailableSpaceRackFaultTolerantBlockPlacementPolicy));
   }
@@ -159,7 +159,7 @@
                   new ArrayList<DatanodeStorageInfo>(), false, null, BLOCK_SIZE,
                   TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
 
-      Assert.assertTrue(targets.length == REPLICA);
+      Assertions.assertTrue(targets.length == REPLICA);
       for (int j = 0; j < REPLICA; j++) {
         total++;
         if (targets[j].getDatanodeDescriptor().getRemainingPercent() > 60) {
@@ -167,10 +167,10 @@
         }
       }
     }
-    Assert.assertTrue(total == REPLICA * CHOOSE_TIMES);
+    Assertions.assertTrue(total == REPLICA * CHOOSE_TIMES);
     double possibility = 1.0 * moreRemainingNode / total;
-    Assert.assertTrue(possibility > 0.52);
-    Assert.assertTrue(possibility < 0.55);
+    Assertions.assertTrue(possibility > 0.52);
+    Assertions.assertTrue(possibility < 0.55);
   }
 
   @Test
@@ -184,7 +184,7 @@
             .chooseDataNode("~", allNodes);
       }
     } catch (NullPointerException npe) {
-      Assert.fail("NPE should not be thrown");
+      Assertions.fail("NPE should not be thrown");
     }
   }
 
@@ -206,7 +206,7 @@
     assertEquals(REPLICA, racks.size());
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() {
     if (namenode != null) {
       namenode.stop();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
index 70f13eb..c2475fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
@@ -18,9 +18,10 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -36,8 +37,8 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 
 /**
@@ -56,9 +57,9 @@
     BlockInfo blockInfo = new BlockInfoContiguous((short) 3);
     BlockCollection bc = Mockito.mock(BlockCollection.class);
     blockInfo.setBlockCollectionId(1000);
-    Assert.assertFalse(blockInfo.isDeleted());
+    Assertions.assertFalse(blockInfo.isDeleted());
     blockInfo.setBlockCollectionId(INVALID_INODE_ID);
-    Assert.assertTrue(blockInfo.isDeleted());
+    Assertions.assertTrue(blockInfo.isDeleted());
   }
 
   @Test
@@ -70,8 +71,8 @@
 
     boolean added = blockInfo.addStorage(storage, blockInfo);
 
-    Assert.assertTrue(added);
-    Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
+    Assertions.assertTrue(added);
+    Assertions.assertEquals(storage, blockInfo.getStorageInfo(0));
   }
 
   @Test
@@ -81,9 +82,9 @@
     DatanodeStorageInfo providedStorage = mock(DatanodeStorageInfo.class);
     when(providedStorage.getStorageType()).thenReturn(StorageType.PROVIDED);
     boolean added = blockInfo.addStorage(providedStorage, blockInfo);
-    Assert.assertTrue(added);
-    Assert.assertEquals(providedStorage, blockInfo.getStorageInfo(0));
-    Assert.assertTrue(blockInfo.isProvided());
+    Assertions.assertTrue(added);
+    Assertions.assertEquals(providedStorage, blockInfo.getStorageInfo(0));
+    Assertions.assertTrue(blockInfo.isProvided());
   }
 
   @Test
@@ -95,16 +96,16 @@
     when(diskStorage.getDatanodeDescriptor()).thenReturn(mockDN);
     when(diskStorage.getStorageType()).thenReturn(StorageType.DISK);
     boolean added = blockInfo.addStorage(diskStorage, blockInfo);
-    Assert.assertTrue(added);
-    Assert.assertEquals(diskStorage, blockInfo.getStorageInfo(0));
-    Assert.assertFalse(blockInfo.isProvided());
+    Assertions.assertTrue(added);
+    Assertions.assertEquals(diskStorage, blockInfo.getStorageInfo(0));
+    Assertions.assertFalse(blockInfo.isProvided());
 
     // now add provided storage
     DatanodeStorageInfo providedStorage = mock(DatanodeStorageInfo.class);
     when(providedStorage.getStorageType()).thenReturn(StorageType.PROVIDED);
     added = blockInfo.addStorage(providedStorage, blockInfo);
-    Assert.assertTrue(added);
-    Assert.assertTrue(blockInfo.isProvided());
+    Assertions.assertTrue(added);
+    Assertions.assertTrue(blockInfo.isProvided());
   }
 
   @Test
@@ -127,8 +128,8 @@
     // Try to move one of the blocks to a different storage.
     boolean added =
         storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
-    Assert.assertThat(added, is(false));
-    Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
+    assertThat(added, is(false));
+    assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
   }
 
   @Test(expected=IllegalArgumentException.class)
@@ -160,21 +161,21 @@
       blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3));
       dd.addBlock(blockInfoList.get(i));
 
-      // index of the datanode should be 0
-      assertEquals("Find datanode should be 0", 0, blockInfoList.get(i)
-          .findStorageInfo(dd));
+        // index of the datanode should be 0
+        assertEquals(0, blockInfoList.get(i)
+                .findStorageInfo(dd), "Find datanode should be 0");
     }
 
     // list length should be equal to the number of blocks we inserted
     LOG.info("Checking list length...");
-    assertEquals("Length should be MAX_BLOCK", maxBlocks, dd.numBlocks());
+      assertEquals(maxBlocks, dd.numBlocks(), "Length should be MAX_BLOCK");
     Iterator<BlockInfo> it = dd.getBlockIterator();
     int len = 0;
     while (it.hasNext()) {
       it.next();
       len++;
     }
-    assertEquals("There should be MAX_BLOCK blockInfo's", maxBlocks, len);
+      assertEquals(maxBlocks, len, "There should be MAX_BLOCK blockInfo's");
 
     headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);
 
@@ -182,9 +183,9 @@
     for (int i = 0; i < maxBlocks; i++) {
       curIndex = blockInfoList.get(i).findStorageInfo(dd);
       headIndex = dd.moveBlockToHead(blockInfoList.get(i), curIndex, headIndex);
-      // the moved element must be at the head of the list
-      assertEquals("Block should be at the head of the list now.",
-          blockInfoList.get(i), dd.getBlockListHeadForTesting());
+        // the moved element must be at the head of the list
+        assertEquals(
+                blockInfoList.get(i), dd.getBlockListHeadForTesting(), "Block should be at the head of the list now.");
     }
 
     // move head of the list to the head - this should not change the list
@@ -194,18 +195,18 @@
     curIndex = 0;
     headIndex = 0;
     dd.moveBlockToHead(temp, curIndex, headIndex);
-    assertEquals(
-        "Moving head to the head of the list shopuld not change the list",
-        temp, dd.getBlockListHeadForTesting());
+      assertEquals(
+              temp, dd.getBlockListHeadForTesting(),
+              "Moving head to the head of the list shopuld not change the list");
 
     // check all elements of the list against the original blockInfoList
     LOG.info("Checking elements of the list...");
     temp = dd.getBlockListHeadForTesting();
-    assertNotNull("Head should not be null", temp);
+      assertNotNull(temp, "Head should not be null");
     int c = maxBlocks - 1;
     while (temp != null) {
-      assertEquals("Expected element is not on the list",
-          blockInfoList.get(c--), temp);
+        assertEquals(
+                blockInfoList.get(c--), temp, "Expected element is not on the list");
       temp = temp.getNext(0);
     }
 
@@ -216,9 +217,9 @@
       int j = rand.nextInt(maxBlocks);
       curIndex = blockInfoList.get(j).findStorageInfo(dd);
       headIndex = dd.moveBlockToHead(blockInfoList.get(j), curIndex, headIndex);
-      // the moved element must be at the head of the list
-      assertEquals("Block should be at the head of the list now.",
-          blockInfoList.get(j), dd.getBlockListHeadForTesting());
+        // the moved element must be at the head of the list
+        assertEquals(
+                blockInfoList.get(j), dd.getBlockListHeadForTesting(), "Block should be at the head of the list now.");
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index 878edf2..c6cac4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
@@ -30,9 +30,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -45,10 +45,7 @@
 import java.nio.ByteBuffer;
 import java.util.Collection;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test {@link BlockInfoStriped}.
@@ -97,38 +94,38 @@
     int i = 0;
     for (; i < storageInfos.length; i += 2) {
       info.addStorage(storageInfos[i], blocks[i]);
-      Assert.assertEquals(i/2 + 1, info.numNodes());
+      Assertions.assertEquals(i/2 + 1, info.numNodes());
     }
     i /= 2;
     for (int j = 1; j < storageInfos.length; j += 2) {
-      Assert.assertTrue(info.addStorage(storageInfos[j], blocks[j]));
-      Assert.assertEquals(i + (j+1)/2, info.numNodes());
+      Assertions.assertTrue(info.addStorage(storageInfos[j], blocks[j]));
+      Assertions.assertEquals(i + (j+1)/2, info.numNodes());
     }
 
     // check
     byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices");
-    Assert.assertEquals(totalBlocks, info.getCapacity());
-    Assert.assertEquals(totalBlocks, indices.length);
+    Assertions.assertEquals(totalBlocks, info.getCapacity());
+    Assertions.assertEquals(totalBlocks, indices.length);
     i = 0;
     for (DatanodeStorageInfo storage : storageInfos) {
       int index = info.findStorageInfo(storage);
-      Assert.assertEquals(i++, index);
-      Assert.assertEquals(index, indices[index]);
+      Assertions.assertEquals(i++, index);
+      Assertions.assertEquals(index, indices[index]);
     }
 
     // the same block is reported from the same storage twice
     i = 0;
     for (DatanodeStorageInfo storage : storageInfos) {
-      Assert.assertTrue(info.addStorage(storage, blocks[i++]));
+      Assertions.assertTrue(info.addStorage(storage, blocks[i++]));
     }
-    Assert.assertEquals(totalBlocks, info.getCapacity());
-    Assert.assertEquals(totalBlocks, info.numNodes());
-    Assert.assertEquals(totalBlocks, indices.length);
+    Assertions.assertEquals(totalBlocks, info.getCapacity());
+    Assertions.assertEquals(totalBlocks, info.numNodes());
+    Assertions.assertEquals(totalBlocks, indices.length);
     i = 0;
     for (DatanodeStorageInfo storage : storageInfos) {
       int index = info.findStorageInfo(storage);
-      Assert.assertEquals(i++, index);
-      Assert.assertEquals(index, indices[index]);
+      Assertions.assertEquals(i++, index);
+      Assertions.assertEquals(index, indices[index]);
     }
 
     // the same block is reported from another storage
@@ -137,15 +134,15 @@
     // only add the second half of info2
     for (i = totalBlocks; i < storageInfos2.length; i++) {
       info.addStorage(storageInfos2[i], blocks[i % totalBlocks]);
-      Assert.assertEquals(i + 1, info.getCapacity());
-      Assert.assertEquals(i + 1, info.numNodes());
+      Assertions.assertEquals(i + 1, info.getCapacity());
+      Assertions.assertEquals(i + 1, info.numNodes());
       indices = (byte[]) Whitebox.getInternalState(info, "indices");
-      Assert.assertEquals(i + 1, indices.length);
+      Assertions.assertEquals(i + 1, indices.length);
     }
     for (i = totalBlocks; i < storageInfos2.length; i++) {
       int index = info.findStorageInfo(storageInfos2[i]);
-      Assert.assertEquals(i++, index);
-      Assert.assertEquals(index - totalBlocks, indices[index]);
+      Assertions.assertEquals(i++, index);
+      Assertions.assertEquals(index - totalBlocks, indices[index]);
     }
   }
 
@@ -164,17 +161,17 @@
     info.removeStorage(storages[2]);
 
     // check
-    Assert.assertEquals(totalBlocks, info.getCapacity());
-    Assert.assertEquals(totalBlocks - 2, info.numNodes());
+    Assertions.assertEquals(totalBlocks, info.getCapacity());
+    Assertions.assertEquals(totalBlocks - 2, info.numNodes());
     byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices");
     for (int i = 0; i < storages.length; i++) {
       int index = info.findStorageInfo(storages[i]);
       if (i != 0 && i != 2) {
-        Assert.assertEquals(i, index);
-        Assert.assertEquals(index, indices[index]);
+        Assertions.assertEquals(i, index);
+        Assertions.assertEquals(index, indices[index]);
       } else {
-        Assert.assertEquals(-1, index);
-        Assert.assertEquals(-1, indices[i]);
+        Assertions.assertEquals(-1, index);
+        Assertions.assertEquals(-1, indices[i]);
       }
     }
 
@@ -185,17 +182,17 @@
       info.addStorage(storages2[i], blocks[i % totalBlocks]);
     }
     // now we should have 8 storages
-    Assert.assertEquals(totalBlocks * 2 - 2, info.numNodes());
-    Assert.assertEquals(totalBlocks * 2 - 2, info.getCapacity());
+    Assertions.assertEquals(totalBlocks * 2 - 2, info.numNodes());
+    Assertions.assertEquals(totalBlocks * 2 - 2, info.getCapacity());
     indices = (byte[]) Whitebox.getInternalState(info, "indices");
-    Assert.assertEquals(totalBlocks * 2 - 2, indices.length);
+    Assertions.assertEquals(totalBlocks * 2 - 2, indices.length);
     int j = totalBlocks;
     for (int i = totalBlocks; i < storages2.length; i++) {
       int index = info.findStorageInfo(storages2[i]);
       if (i == totalBlocks || i == totalBlocks + 2) {
-        Assert.assertEquals(i - totalBlocks, index);
+        Assertions.assertEquals(i - totalBlocks, index);
       } else {
-        Assert.assertEquals(j++, index);
+        Assertions.assertEquals(j++, index);
       }
     }
 
@@ -204,22 +201,22 @@
       info.removeStorage(storages2[i + totalBlocks]);
     }
     // now we should have 3 storages
-    Assert.assertEquals(totalBlocks - 2, info.numNodes());
-    Assert.assertEquals(totalBlocks * 2 - 2, info.getCapacity());
+    Assertions.assertEquals(totalBlocks - 2, info.numNodes());
+    Assertions.assertEquals(totalBlocks * 2 - 2, info.getCapacity());
     indices = (byte[]) Whitebox.getInternalState(info, "indices");
-    Assert.assertEquals(totalBlocks * 2 - 2, indices.length);
+    Assertions.assertEquals(totalBlocks * 2 - 2, indices.length);
     for (int i = 0; i < totalBlocks; i++) {
       if (i == 0 || i == 2) {
         int index = info.findStorageInfo(storages2[i + totalBlocks]);
-        Assert.assertEquals(-1, index);
+        Assertions.assertEquals(-1, index);
       } else {
         int index = info.findStorageInfo(storages[i]);
-        Assert.assertEquals(i, index);
+        Assertions.assertEquals(i, index);
       }
     }
     for (int i = totalBlocks; i < totalBlocks * 2 - 2; i++) {
-      Assert.assertEquals(-1, indices[i]);
-      Assert.assertNull(info.getDatanode(i));
+      Assertions.assertEquals(-1, indices[i]);
+      Assertions.assertNull(info.getDatanode(i));
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index d5e0a99..5193c95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -82,9 +82,9 @@
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.LightWeightGSet;
 import org.slf4j.event.Level;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 import java.io.BufferedReader;
@@ -115,11 +115,7 @@
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -152,7 +148,7 @@
   private long mockINodeId;
 
 
-  @Before
+  @BeforeEach
   public void setupMockCluster() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
@@ -229,12 +225,12 @@
 
     DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
     assertEquals(2, pipeline.length);
-    assertTrue("Source of replication should be one of the nodes the block " +
-        "was on. Was: " + pipeline[0],
-        origStorages.contains(pipeline[0]));
-    assertTrue("Destination of replication should be on the other rack. " +
-        "Was: " + pipeline[1],
-        rackB.contains(pipeline[1].getDatanodeDescriptor()));
+      assertTrue(
+              origStorages.contains(pipeline[0]), "Source of replication should be one of the nodes the block " +
+              "was on. Was: " + pipeline[0]);
+      assertTrue(
+              rackB.contains(pipeline[1].getDatanodeDescriptor()), "Destination of replication should be on the other rack. " +
+              "Was: " + pipeline[1]);
   }
   
 
@@ -263,10 +259,10 @@
     List<DatanodeDescriptor> decomNodes = startDecommission(0, 1);
     
     DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
-    assertTrue("Source of replication should be one of the nodes the block " +
-        "was on. Was: " + pipeline[0],
-        origStorages.contains(pipeline[0]));
-    assertEquals("Should have three targets", 3, pipeline.length);
+      assertTrue(
+              origStorages.contains(pipeline[0]), "Source of replication should be one of the nodes the block " +
+              "was on. Was: " + pipeline[0]);
+      assertEquals(3, pipeline.length, "Should have three targets");
     
     boolean foundOneOnRackA = false;
     for (int i = 1; i < pipeline.length; i++) {
@@ -277,10 +273,10 @@
       assertFalse(decomNodes.contains(target));
       assertFalse(origNodes.contains(target));
     }
-    
-    assertTrue("Should have at least one target on rack A. Pipeline: " +
-        Joiner.on(",").join(pipeline),
-        foundOneOnRackA);
+
+      assertTrue(
+              foundOneOnRackA, "Should have at least one target on rack A. Pipeline: " +
+              Joiner.on(",").join(pipeline));
   }
   
 
@@ -307,10 +303,10 @@
     List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 3);
     
     DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
-    assertTrue("Source of replication should be one of the nodes the block " +
-        "was on. Was: " + pipeline[0],
-        origStorages.contains(pipeline[0]));
-    assertEquals("Should have three targets", 4, pipeline.length);
+      assertTrue(
+              origStorages.contains(pipeline[0]), "Source of replication should be one of the nodes the block " +
+              "was on. Was: " + pipeline[0]);
+      assertEquals(4, pipeline.length, "Should have three targets");
     
     boolean foundOneOnRackA = false;
     boolean foundOneOnRackB = false;
@@ -324,13 +320,13 @@
       assertFalse(decomNodes.contains(target));
       assertFalse(origNodes.contains(target));
     }
-    
-    assertTrue("Should have at least one target on rack A. Pipeline: " +
-        Joiner.on(",").join(pipeline),
-        foundOneOnRackA);
-    assertTrue("Should have at least one target on rack B. Pipeline: " +
-        Joiner.on(",").join(pipeline),
-        foundOneOnRackB);
+
+      assertTrue(
+              foundOneOnRackA, "Should have at least one target on rack A. Pipeline: " +
+              Joiner.on(",").join(pipeline));
+      assertTrue(
+              foundOneOnRackB, "Should have at least one target on rack B. Pipeline: " +
+              Joiner.on(",").join(pipeline));
   }
 
   /**
@@ -362,11 +358,11 @@
     List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 2);
     
     DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
-    assertTrue("Source of replication should be one of the nodes the block " +
-        "was on. Was: " + pipeline[0],
-        origStorages.contains(pipeline[0]));
-    // Only up to two nodes can be picked per rack when there are two racks.
-    assertEquals("Should have two targets", 2, pipeline.length);
+      assertTrue(
+              origStorages.contains(pipeline[0]), "Source of replication should be one of the nodes the block " +
+              "was on. Was: " + pipeline[0]);
+      // Only up to two nodes can be picked per rack when there are two racks.
+      assertEquals(2, pipeline.length, "Should have two targets");
     
     boolean foundOneOnRackB = false;
     for (int i = 1; i < pipeline.length; i++) {
@@ -377,10 +373,10 @@
       assertFalse(decomNodes.contains(target));
       assertFalse(origNodes.contains(target));
     }
-    
-    assertTrue("Should have at least one target on rack B. Pipeline: " +
-        Joiner.on(",").join(pipeline),
-        foundOneOnRackB);
+
+      assertTrue(
+              foundOneOnRackB, "Should have at least one target on rack B. Pipeline: " +
+              Joiner.on(",").join(pipeline));
     
     // Mark the block as received on the target nodes in the pipeline
     fulfillPipeline(blockInfo, pipeline);
@@ -419,12 +415,12 @@
     DatanodeStorageInfo pipeline[] = scheduleSingleReplication(blockInfo);
     
     assertEquals(2, pipeline.length); // single new copy
-    assertTrue("Source of replication should be one of the nodes the block " +
-        "was on. Was: " + pipeline[0],
-        origNodes.contains(pipeline[0].getDatanodeDescriptor()));
-    assertTrue("Destination of replication should be on the other rack. " +
-        "Was: " + pipeline[1],
-        rackB.contains(pipeline[1].getDatanodeDescriptor()));
+      assertTrue(
+              origNodes.contains(pipeline[0].getDatanodeDescriptor()), "Source of replication should be one of the nodes the block " +
+              "was on. Was: " + pipeline[0]);
+      assertTrue(
+              rackB.contains(pipeline[1].getDatanodeDescriptor()), "Destination of replication should be on the other rack. " +
+              "Was: " + pipeline[1]);
   }
   
   @Test
@@ -634,13 +630,13 @@
     list_all.add(new ArrayList<BlockInfo>()); // for priority 0
     list_all.add(list_p1); // for priority 1
 
-    assertEquals("Block not initially pending reconstruction", 0,
-        bm.pendingReconstruction.getNumReplicas(block));
-    assertEquals(
-        "computeBlockReconstructionWork should indicate reconstruction is needed",
-        1, bm.computeReconstructionWorkForBlocks(list_all));
-    assertTrue("reconstruction is pending after work is computed",
-        bm.pendingReconstruction.getNumReplicas(block) > 0);
+      assertEquals(0,
+              bm.pendingReconstruction.getNumReplicas(block), "Block not initially pending reconstruction");
+      assertEquals(
+              1, bm.computeReconstructionWorkForBlocks(list_all),
+              "computeBlockReconstructionWork should indicate reconstruction is needed");
+      assertTrue(
+              bm.pendingReconstruction.getNumReplicas(block) > 0, "reconstruction is pending after work is computed");
 
     LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> repls =
         getAllPendingReconstruction();
@@ -690,44 +686,44 @@
     List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>();
     List<DatanodeStorageInfo> liveNodes = new LinkedList<DatanodeStorageInfo>();
 
-    assertNotNull("Chooses source node for a highest-priority replication"
-        + " even if all available source nodes have reached their replication"
-        + " limits below the hard limit.",
-        bm.chooseSourceDatanodes(
-            bm.getStoredBlock(aBlock),
-            cntNodes,
-            liveNodes,
-            new NumberReplicas(),
-            new ArrayList<Byte>(),
-            new ArrayList<Byte>(),
-            LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY)[0]);
+      assertNotNull(
+              bm.chooseSourceDatanodes(
+                      bm.getStoredBlock(aBlock),
+                      cntNodes,
+                      liveNodes,
+                      new NumberReplicas(),
+                      new ArrayList<Byte>(),
+                      new ArrayList<Byte>(),
+                      LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY)[0], "Chooses source node for a highest-priority replication"
+              + " even if all available source nodes have reached their replication"
+              + " limits below the hard limit.");
 
-    assertEquals("Does not choose a source node for a less-than-highest-priority"
-            + " replication since all available source nodes have reached"
-            + " their replication limits.", 0,
-        bm.chooseSourceDatanodes(
-            bm.getStoredBlock(aBlock),
-            cntNodes,
-            liveNodes,
-            new NumberReplicas(),
-            new ArrayList<Byte>(),
-            new ArrayList<Byte>(),
-            LowRedundancyBlocks.QUEUE_VERY_LOW_REDUNDANCY).length);
+      assertEquals(0,
+              bm.chooseSourceDatanodes(
+                      bm.getStoredBlock(aBlock),
+                      cntNodes,
+                      liveNodes,
+                      new NumberReplicas(),
+                      new ArrayList<Byte>(),
+                      new ArrayList<Byte>(),
+                      LowRedundancyBlocks.QUEUE_VERY_LOW_REDUNDANCY).length, "Does not choose a source node for a less-than-highest-priority"
+              + " replication since all available source nodes have reached"
+              + " their replication limits.");
 
     // Increase the replication count to test replication count > hard limit
     DatanodeStorageInfo targets[] = { origNodes.get(1).getStorageInfos()[0] };
     origNodes.get(0).addBlockToBeReplicated(aBlock, targets);
 
-    assertEquals("Does not choose a source node for a highest-priority"
-            + " replication when all available nodes exceed the hard limit.", 0,
-        bm.chooseSourceDatanodes(
-            bm.getStoredBlock(aBlock),
-            cntNodes,
-            liveNodes,
-            new NumberReplicas(),
-            new ArrayList<Byte>(),
-            new ArrayList<Byte>(),
-            LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY).length);
+      assertEquals(0,
+              bm.chooseSourceDatanodes(
+                      bm.getStoredBlock(aBlock),
+                      cntNodes,
+                      liveNodes,
+                      new NumberReplicas(),
+                      new ArrayList<Byte>(),
+                      new ArrayList<Byte>(),
+                      LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY).length, "Does not choose a source node for a highest-priority"
+              + " replication when all available nodes exceed the hard limit.");
   }
 
   @Test
@@ -782,15 +778,15 @@
             liveBusyBlockIndices,
             LowRedundancyBlocks.QUEUE_VERY_LOW_REDUNDANCY);
 
-    assertEquals("Choose the source node for reconstruction with one node reach"
-            + " the MAX maxReplicationStreams, the numReplicas still return the"
-            + " correct live replicas.", 4,
-            numReplicas.liveReplicas());
+      assertEquals(4,
+              numReplicas.liveReplicas(), "Choose the source node for reconstruction with one node reach"
+              + " the MAX maxReplicationStreams, the numReplicas still return the"
+              + " correct live replicas.");
 
-    assertEquals("Choose the source node for reconstruction with one node reach"
-            + " the MAX maxReplicationStreams, the numReplicas should return"
-            + " the correct redundant Internal Blocks.", 1,
-            numReplicas.redundantInternalBlocks());
+      assertEquals(1,
+              numReplicas.redundantInternalBlocks(), "Choose the source node for reconstruction with one node reach"
+              + " the MAX maxReplicationStreams, the numReplicas should return"
+              + " the correct redundant Internal Blocks.");
   }
 
   @Test
@@ -844,12 +840,12 @@
         numReplicas, liveBlockIndices,
         liveBusyBlockIndices,
         LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY);
-    assertEquals("There are 5 live replicas in " +
-            "[ds2, ds3, ds4, ds5, ds6] datanodes ",
-        5, numReplicas.liveReplicas());
-    assertEquals("The ds1 datanode is in decommissioning, " +
-            "so there is no redundant replica",
-        0, numReplicas.redundantInternalBlocks());
+      assertEquals(
+              5, numReplicas.liveReplicas(), "There are 5 live replicas in " +
+              "[ds2, ds3, ds4, ds5, ds6] datanodes ");
+      assertEquals(
+              0, numReplicas.redundantInternalBlocks(), "The ds1 datanode is in decommissioning, " +
+              "so there is no redundant replica");
   }
 
   @Test
@@ -867,33 +863,33 @@
     List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>();
     List<DatanodeStorageInfo> liveNodes = new LinkedList<DatanodeStorageInfo>();
 
-    assertNotNull("Chooses decommissioning source node for a normal replication"
-        + " if all available source nodes have reached their replication"
-        + " limits below the hard limit.",
-        bm.chooseSourceDatanodes(
-            bm.getStoredBlock(aBlock),
-            cntNodes,
-            liveNodes,
-            new NumberReplicas(),
-            new LinkedList<Byte>(),
-            new ArrayList<Byte>(),
-            LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY)[0]);
+      assertNotNull(
+              bm.chooseSourceDatanodes(
+                      bm.getStoredBlock(aBlock),
+                      cntNodes,
+                      liveNodes,
+                      new NumberReplicas(),
+                      new LinkedList<Byte>(),
+                      new ArrayList<Byte>(),
+                      LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY)[0], "Chooses decommissioning source node for a normal replication"
+              + " if all available source nodes have reached their replication"
+              + " limits below the hard limit.");
 
 
     // Increase the replication count to test replication count > hard limit
     DatanodeStorageInfo targets[] = { origNodes.get(1).getStorageInfos()[0] };
     origNodes.get(0).addBlockToBeReplicated(aBlock, targets);
 
-    assertEquals("Does not choose a source decommissioning node for a normal"
-        + " replication when all available nodes exceed the hard limit.", 0,
-        bm.chooseSourceDatanodes(
-            bm.getStoredBlock(aBlock),
-            cntNodes,
-            liveNodes,
-            new NumberReplicas(),
-            new LinkedList<Byte>(),
-            new ArrayList<Byte>(),
-            LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY).length);
+      assertEquals(0,
+              bm.chooseSourceDatanodes(
+                      bm.getStoredBlock(aBlock),
+                      cntNodes,
+                      liveNodes,
+                      new NumberReplicas(),
+                      new LinkedList<Byte>(),
+                      new ArrayList<Byte>(),
+                      LowRedundancyBlocks.QUEUE_LOW_REDUNDANCY).length, "Does not choose a source decommissioning node for a normal"
+              + " replication when all available nodes exceed the hard limit.");
   }
 
   @Test
@@ -1133,9 +1129,9 @@
             rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
     bm.setInitializedReplQueues(true);
     bm.processIncrementalBlockReport(node, srdb);
-    // Needed replications should still be 0.
-    assertEquals("UC block was incorrectly added to needed Replications",
-        0, bm.neededReconstruction.size());
+      // Needed replications should still be 0.
+      assertEquals(
+              0, bm.neededReconstruction.size(), "UC block was incorrectly added to needed Replications");
     bm.setInitializedReplQueues(false);
   }
 
@@ -1230,11 +1226,11 @@
     BlockPlacementPolicyDefault policyDefault =
         (BlockPlacementPolicyDefault) bm.getBlockPlacementPolicy();
     excessTypes.add(StorageType.DEFAULT);
-    Assert.assertTrue(policyDefault.useDelHint(delHint, null, moreThan1Racks,
+    Assertions.assertTrue(policyDefault.useDelHint(delHint, null, moreThan1Racks,
         null, excessTypes));
     excessTypes.remove(0);
     excessTypes.add(StorageType.SSD);
-    Assert.assertFalse(policyDefault.useDelHint(delHint, null, moreThan1Racks,
+    Assertions.assertFalse(policyDefault.useDelHint(delHint, null, moreThan1Racks,
         null, excessTypes));
   }
 
@@ -1422,7 +1418,7 @@
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       File storageDir = cluster.getInstanceStorageDir(0, 0);
       File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("Data directory does not exist", dataDir.exists());
+        assertTrue(dataDir.exists(), "Data directory does not exist");
       BlockInfo blockInfo =
           blockManager.blocksMap.getBlocks().iterator().next();
       ExtendedBlock blk = new ExtendedBlock(bpid, blockInfo.getBlockId(),
@@ -1474,9 +1470,9 @@
       LocatedBlocks locatedBlocks =
           blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
               false, false, null, null);
-      assertTrue("Located Blocks should exclude corrupt" +
-              "replicas and failed storages",
-          locatedBlocks.getLocatedBlocks().size() == 1);
+        assertTrue(
+                locatedBlocks.getLocatedBlocks().size() == 1, "Located Blocks should exclude corrupt" +
+                "replicas and failed storages");
       ns.readUnlock();
     } finally {
       if (cluster != null) {
@@ -1506,18 +1502,18 @@
           break;
         }
       }
-      assertTrue("Unexpected text in metasave," +
-              "was expecting corrupt blocks section!", foundIt);
+        assertTrue(foundIt, "Unexpected text in metasave," +
+                "was expecting corrupt blocks section!");
       corruptBlocksLine = reader.readLine();
       String regex = "Block=blk_[0-9]+_[0-9]+\\tSize=.*\\tNode=.*" +
           "\\tStorageID=.*\\tStorageState.*" +
           "\\tTotalReplicas=.*\\tReason=GENSTAMP_MISMATCH";
-      assertTrue("Unexpected corrupt block section in metasave!",
-          corruptBlocksLine.matches(regex));
+        assertTrue(
+                corruptBlocksLine.matches(regex), "Unexpected corrupt block section in metasave!");
       corruptBlocksLine = reader.readLine();
       regex = "Metasave: Number of datanodes.*";
-      assertTrue("Unexpected corrupt block section in metasave!",
-          corruptBlocksLine.matches(regex));
+        assertTrue(
+                corruptBlocksLine.matches(regex), "Unexpected corrupt block section in metasave!");
     } finally {
       if (reader != null)
         reader.close();
@@ -1639,13 +1635,13 @@
           + ", DataNode: " + dn.getDatanodeDescriptor().getXferAddr());
     }
     if (isBlockPlacementSatisfied) {
-      assertTrue("Block group of " + file + "should be placement" +
-              " policy satisfied, currently!",
-          blockManager.isPlacementPolicySatisfied(blockInfo));
+        assertTrue(
+                blockManager.isPlacementPolicySatisfied(blockInfo), "Block group of " + file + "should be placement" +
+                " policy satisfied, currently!");
     } else {
-      assertFalse("Block group of " + file + " should be placement" +
-              " policy unsatisfied, currently!",
-          blockManager.isPlacementPolicySatisfied(blockInfo));
+        assertFalse(
+                blockManager.isPlacementPolicySatisfied(blockInfo), "Block group of " + file + " should be placement" +
+                " policy unsatisfied, currently!");
     }
   }
 
@@ -1675,8 +1671,8 @@
         buffer.append(line);
       }
       String output = buffer.toString();
-      assertTrue("Metasave output should not have null block ",
-          output.contains("Block blk_0_0 is Null"));
+        assertTrue(
+                output.contains("Block blk_0_0 is Null"), "Metasave output should not have null block ");
 
     } finally {
       reader.close();
@@ -1703,14 +1699,14 @@
         buffer.append(line);
       }
       String output = buffer.toString();
-      assertTrue("Metasave output should have reported missing blocks.",
-          output.contains("Metasave: Blocks currently missing: 1"));
-      assertTrue("There should be 0 blocks waiting for reconstruction",
-          output.contains("Metasave: Blocks waiting for reconstruction: 0"));
+        assertTrue(
+                output.contains("Metasave: Blocks currently missing: 1"), "Metasave output should have reported missing blocks.");
+        assertTrue(
+                output.contains("Metasave: Blocks waiting for reconstruction: 0"), "There should be 0 blocks waiting for reconstruction");
       String blockNameGS = block.getBlockName() + "_" +
           block.getGenerationStamp();
-      assertTrue("Block " + blockNameGS + " should be MISSING.",
-          output.contains(blockNameGS + " MISSING"));
+        assertTrue(
+                output.contains(blockNameGS + " MISSING"), "Block " + blockNameGS + " should be MISSING.");
     } finally {
       reader.close();
       file.delete();
@@ -1776,18 +1772,18 @@
         System.out.println(line);
       }
       String output = buffer.toString();
-      assertTrue("Metasave output should not have reported " +
-              "missing blocks.",
-          output.contains("Metasave: Blocks currently missing: 0"));
-      assertTrue("There should be 1 block waiting for reconstruction",
-          output.contains("Metasave: Blocks waiting for reconstruction: 1"));
+        assertTrue(
+                output.contains("Metasave: Blocks currently missing: 0"), "Metasave output should not have reported " +
+                "missing blocks.");
+        assertTrue(
+                output.contains("Metasave: Blocks waiting for reconstruction: 1"), "There should be 1 block waiting for reconstruction");
       String blockNameGS = block.getBlockName() + "_" +
           block.getGenerationStamp();
-      assertTrue("Block " + blockNameGS +
-              " should be list as maintenance.",
-          output.contains(blockNameGS + " (replicas: live: 1 decommissioning " +
-              "and decommissioned: 0 corrupt: 0 in excess: " +
-              "0 maintenance mode: 1)"));
+        assertTrue(
+                output.contains(blockNameGS + " (replicas: live: 1 decommissioning " +
+                        "and decommissioned: 0 corrupt: 0 in excess: " +
+                        "0 maintenance mode: 1)"), "Block " + blockNameGS +
+                " should be list as maintenance.");
     } finally {
       reader.close();
       file.delete();
@@ -1831,18 +1827,18 @@
         buffer.append(line);
       }
       String output = buffer.toString();
-      assertTrue("Metasave output should not have reported " +
-              "missing blocks.",
-          output.contains("Metasave: Blocks currently missing: 0"));
-      assertTrue("There should be 1 block waiting for reconstruction",
-          output.contains("Metasave: Blocks waiting for reconstruction: 1"));
+        assertTrue(
+                output.contains("Metasave: Blocks currently missing: 0"), "Metasave output should not have reported " +
+                "missing blocks.");
+        assertTrue(
+                output.contains("Metasave: Blocks waiting for reconstruction: 1"), "There should be 1 block waiting for reconstruction");
       String blockNameGS = block.getBlockName() + "_" +
           block.getGenerationStamp();
-      assertTrue("Block " + blockNameGS +
-              " should be list as maintenance.",
-          output.contains(blockNameGS + " (replicas: live: 1 decommissioning " +
-              "and decommissioned: 1 corrupt: 0 in excess: " +
-              "0 maintenance mode: 0)"));
+        assertTrue(
+                output.contains(blockNameGS + " (replicas: live: 1 decommissioning " +
+                        "and decommissioned: 1 corrupt: 0 in excess: " +
+                        "0 maintenance mode: 0)"), "Block " + blockNameGS +
+                " should be list as maintenance.");
     } finally {
       reader.close();
       file.delete();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
index 5d2a07e..b2989ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
@@ -30,18 +30,15 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -81,7 +78,7 @@
    *
    * @throws IOException
    */
-  @Before
+  @BeforeEach
   public void setupMockCluster() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setDouble(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
@@ -115,8 +112,8 @@
    */
   @Test(timeout = 30000)
   public void testInitialize() {
-    assertFalse("Block manager should not be in safe mode at beginning.",
-        bmSafeMode.isInSafeMode());
+      assertFalse(
+              bmSafeMode.isInSafeMode(), "Block manager should not be in safe mode at beginning.");
     bmSafeMode.activate(BLOCK_TOTAL);
     assertEquals(BMSafeModeStatus.PENDING_THRESHOLD, getSafeModeStatus());
     assertTrue(bmSafeMode.isInSafeMode());
@@ -485,14 +482,14 @@
     // PENDING_THRESHOLD -> EXTENSION
     bmSafeMode.checkSafeMode();
 
-    assertFalse("Shouldn't leave safe mode in case of blocks with future GS! ",
-        bmSafeMode.leaveSafeMode(false));
-    assertTrue("Leaving safe mode forcefully should succeed regardless of " +
-        "blocks with future GS.", bmSafeMode.leaveSafeMode(true));
-    assertEquals("Number of blocks with future GS should have been cleared " +
-        "after leaving safe mode", 0L, bmSafeMode.getBytesInFuture());
-    assertTrue("Leaving safe mode should succeed after blocks with future GS " +
-        "are cleared.", bmSafeMode.leaveSafeMode(false));
+      assertFalse(
+              bmSafeMode.leaveSafeMode(false), "Shouldn't leave safe mode in case of blocks with future GS! ");
+      assertTrue(bmSafeMode.leaveSafeMode(true), "Leaving safe mode forcefully should succeed regardless of " +
+              "blocks with future GS.");
+      assertEquals(0L, bmSafeMode.getBytesInFuture(), "Number of blocks with future GS should have been cleared " +
+              "after leaving safe mode");
+      assertTrue(bmSafeMode.leaveSafeMode(false), "Leaving safe mode should succeed after blocks with future GS " +
+              "are cleared.");
   }
 
   @Test(timeout = 10000)
@@ -705,16 +702,16 @@
 
   private void assertSafeModeIsLeftAtThreshold(long blockIndex) {
     if (blockIndex < BLOCK_THRESHOLD) {
-      assertEquals("Current block index should be equal to " +
-          "the safe block counter.", blockIndex, getblockSafe());
-      assertTrue("Block Manager should stay in safe mode until " +
-          "the safe block threshold is reached.", bmSafeMode.isInSafeMode());
+        assertEquals(blockIndex, getblockSafe(), "Current block index should be equal to " +
+                "the safe block counter.");
+        assertTrue(bmSafeMode.isInSafeMode(), "Block Manager should stay in safe mode until " +
+                "the safe block threshold is reached.");
     } else {
-      assertEquals("If safe block threshold is reached, safe block " +
-          "counter should not increase further.",
-          BLOCK_THRESHOLD, getblockSafe());
-      assertFalse("Block manager leaves safe mode if block " +
-          "threshold is met.", bmSafeMode.isInSafeMode());
+        assertEquals(
+                BLOCK_THRESHOLD, getblockSafe(), "If safe block threshold is reached, safe block " +
+                "counter should not increase further.");
+        assertFalse(bmSafeMode.isInSafeMode(), "Block manager leaves safe mode if block " +
+                "threshold is met.");
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyDebugLoggingBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyDebugLoggingBuilder.java
index 24b3660..36705b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyDebugLoggingBuilder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementPolicyDebugLoggingBuilder.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusDefault.java
index 6b07334..192f366 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusDefault.java
@@ -17,10 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import org.junit.Test;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 /**
  * Unit tests to validate the BlockPlacementStatusDefault policy, focusing on
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java
index 1e0fb76..a42b99f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockPlacementStatusWithUpgradeDomain.java
@@ -17,17 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.util.HashSet;
 import java.util.Set;
 
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Unit tests for BlockPlacementStatusWithUpgradeDomain class.
@@ -38,7 +36,7 @@
   private BlockPlacementStatusDefault bpsd =
       mock(BlockPlacementStatusDefault.class);
 
-  @Before
+  @BeforeEach
   public void setup() {
     upgradeDomains = new HashSet<String>();
     upgradeDomains.add("1");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java
index a5acc14..f65f0c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportLease.java
@@ -35,7 +35,7 @@
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -44,8 +44,8 @@
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.spy;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
index 9f8b2c7..ab2774b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
@@ -31,10 +31,10 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.After;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.event.Level;
 
 import java.io.IOException;
@@ -53,12 +53,12 @@
     LOG.error("Test error: " + what);
   }
 
-  @After
+  @AfterEach
   public void restoreNormalBlockManagerFaultInjector() {
     BlockManagerFaultInjector.instance = new BlockManagerFaultInjector();
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void raiseBlockManagerLogLevels() {
     GenericTestUtils.setLogLevel(BlockManager.LOG, Level.TRACE);
     GenericTestUtils.setLogLevel(BlockReportLeaseManager.LOG, Level.TRACE);
@@ -155,7 +155,7 @@
       }, 25, 50000);
     }
     cluster.shutdown();
-    Assert.assertEquals("", failure.get());
+    Assertions.assertEquals("", failure.get());
   }
 
   /**
@@ -207,9 +207,9 @@
       BlockManagerFaultInjector.instance = injector;
       cluster.set(new MiniDFSCluster.Builder(conf).numDataNodes(2).build());
       cluster.get().waitActive();
-      Assert.assertNotNull(cluster.get().stopDataNode(datanodeToStop.get()));
+      Assertions.assertNotNull(cluster.get().stopDataNode(datanodeToStop.get()));
       gotFbrSem.acquire();
-      Assert.assertNull(failure.get());
+      Assertions.assertNull(failure.get());
     } finally {
       if (cluster.get() != null) {
         cluster.get().shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index 076a640..dd62dba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -18,11 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -45,10 +41,10 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.eclipse.jetty.util.ajax.JSON;
 import org.junit.rules.Timeout;
 
@@ -62,7 +58,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
@@ -84,7 +80,7 @@
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -219,8 +215,8 @@
     Thread.sleep(6000);
     storageTypeStatsMap = cluster.getNamesystem().getBlockManager()
         .getStorageTypeStats();
-    assertFalse("StorageTypeStatsMap should not contain DISK Storage type",
-        storageTypeStatsMap.containsKey(StorageType.DISK));
+      assertFalse(
+              storageTypeStatsMap.containsKey(StorageType.DISK), "StorageTypeStatsMap should not contain DISK Storage type");
     DataNodeTestUtils.restoreDataDirFromFailure(dn1ArcVol1);
     DataNodeTestUtils.restoreDataDirFromFailure(dn2ArcVol1);
     DataNodeTestUtils.restoreDataDirFromFailure(dn3ArcVol1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 428e252..c1396d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -64,8 +62,8 @@
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 public class TestBlockTokenWithDFS {
@@ -105,7 +103,7 @@
     } catch (IOException e) {
       return false;
     }
-    assertEquals("Cannot read file.", toRead.length, totalRead);
+      assertEquals(toRead.length, totalRead, "Cannot read file.");
     return checkFile(toRead, expected);
   }
 
@@ -113,8 +111,8 @@
   private boolean checkFile2(FSDataInputStream in, byte[] expected) {
     byte[] toRead = new byte[expected.length];
     try {
-      assertEquals("Cannot read file", toRead.length, in.read(0, toRead, 0,
-          toRead.length));
+        assertEquals(toRead.length, in.read(0, toRead, 0,
+                toRead.length), "Cannot read file");
     } catch (IOException e) {
       return false;
     }
@@ -198,14 +196,14 @@
       }
     }
     if (shouldSucceed) {
-      Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
-            + "when it is expected to be valid", blockReader);
+        Assertions.assertNotNull(blockReader, "OP_READ_BLOCK: access token is invalid, "
+                + "when it is expected to be valid");
     } else {
-      Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
-          + "when it is expected to be invalid", ioe);
-      Assert.assertTrue(
-          "OP_READ_BLOCK failed due to reasons other than access token: ",
-          ioe instanceof InvalidBlockTokenException);
+        Assertions.assertNotNull(ioe, "OP_READ_BLOCK: access token is valid, "
+                + "when it is expected to be invalid");
+        Assertions.assertTrue(
+                ioe instanceof InvalidBlockTokenException,
+                "OP_READ_BLOCK failed due to reasons other than access token: ");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
index 124db17..78d6f4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
@@ -28,7 +28,7 @@
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockUnderConstructionFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockUnderConstructionFeature.java
index 15502c9..0957bca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockUnderConstructionFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockUnderConstructionFeature.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class provides tests for {@link BlockUnderConstructionFeature} class
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
index 7dfb951..ac35fcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
@@ -41,10 +41,10 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestBlocksWithNotEnoughRacks {
   public static final Logger LOG =
@@ -298,7 +298,7 @@
       for (int i = 0; i < racks.length; i++) {
         byte[] blockContent = cluster.readBlockOnDataNodeAsBytes(i, b);
         if (blockContent != null && i != dnToCorrupt) {
-          assertArrayEquals("Corrupt replica", fileContent, blockContent);
+            assertArrayEquals(fileContent, blockContent, "Corrupt replica");
         }
       }
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java
index eda60bd..f46df73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java
@@ -27,8 +27,8 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 public class TestCachedBlocksList {
   public static final Logger LOG =
@@ -43,52 +43,52 @@
           new CachedBlock(1L, (short)1, true),
           new CachedBlock(2L, (short)1, true),
       };
-    // check that lists are empty
-    Assert.assertTrue("expected pending cached list to start off empty.", 
-        !dn.getPendingCached().iterator().hasNext());
-    Assert.assertTrue("expected cached list to start off empty.", 
-        !dn.getCached().iterator().hasNext());
-    Assert.assertTrue("expected pending uncached list to start off empty.", 
-        !dn.getPendingUncached().iterator().hasNext());
+      // check that lists are empty
+      Assertions.assertTrue(
+              !dn.getPendingCached().iterator().hasNext(), "expected pending cached list to start off empty.");
+      Assertions.assertTrue(
+              !dn.getCached().iterator().hasNext(), "expected cached list to start off empty.");
+      Assertions.assertTrue(
+              !dn.getPendingUncached().iterator().hasNext(), "expected pending uncached list to start off empty.");
     // add a block to the back
-    Assert.assertTrue(dn.getCached().add(blocks[0]));
-    Assert.assertTrue("expected pending cached list to still be empty.", 
-        !dn.getPendingCached().iterator().hasNext());
-    Assert.assertEquals("failed to insert blocks[0]", blocks[0],
-        dn.getCached().iterator().next());
-    Assert.assertTrue("expected pending uncached list to still be empty.", 
-        !dn.getPendingUncached().iterator().hasNext());
+    Assertions.assertTrue(dn.getCached().add(blocks[0]));
+      Assertions.assertTrue(
+              !dn.getPendingCached().iterator().hasNext(), "expected pending cached list to still be empty.");
+      Assertions.assertEquals(blocks[0],
+              dn.getCached().iterator().next(), "failed to insert blocks[0]");
+      Assertions.assertTrue(
+              !dn.getPendingUncached().iterator().hasNext(), "expected pending uncached list to still be empty.");
     // add another block to the back
-    Assert.assertTrue(dn.getCached().add(blocks[1]));
+    Assertions.assertTrue(dn.getCached().add(blocks[1]));
     Iterator<CachedBlock> iter = dn.getCached().iterator();
-    Assert.assertEquals(blocks[0], iter.next());
-    Assert.assertEquals(blocks[1], iter.next());
-    Assert.assertTrue(!iter.hasNext());
+    Assertions.assertEquals(blocks[0], iter.next());
+    Assertions.assertEquals(blocks[1], iter.next());
+    Assertions.assertTrue(!iter.hasNext());
     // add a block to the front
-    Assert.assertTrue(dn.getCached().addFirst(blocks[2]));
+    Assertions.assertTrue(dn.getCached().addFirst(blocks[2]));
     iter = dn.getCached().iterator();
-    Assert.assertEquals(blocks[2], iter.next());
-    Assert.assertEquals(blocks[0], iter.next());
-    Assert.assertEquals(blocks[1], iter.next());
-    Assert.assertTrue(!iter.hasNext());
+    Assertions.assertEquals(blocks[2], iter.next());
+    Assertions.assertEquals(blocks[0], iter.next());
+    Assertions.assertEquals(blocks[1], iter.next());
+    Assertions.assertTrue(!iter.hasNext());
     // remove a block from the middle
-    Assert.assertTrue(dn.getCached().remove(blocks[0]));
+    Assertions.assertTrue(dn.getCached().remove(blocks[0]));
     iter = dn.getCached().iterator();
-    Assert.assertEquals(blocks[2], iter.next());
-    Assert.assertEquals(blocks[1], iter.next());
-    Assert.assertTrue(!iter.hasNext());
+    Assertions.assertEquals(blocks[2], iter.next());
+    Assertions.assertEquals(blocks[1], iter.next());
+    Assertions.assertTrue(!iter.hasNext());
     // remove all blocks
     dn.getCached().clear();
-    Assert.assertTrue("expected cached list to be empty after clear.", 
-        !dn.getPendingCached().iterator().hasNext());
+      Assertions.assertTrue(
+              !dn.getPendingCached().iterator().hasNext(), "expected cached list to be empty after clear.");
   }
 
   private void testAddElementsToList(CachedBlocksList list,
       CachedBlock[] blocks) {
-    Assert.assertTrue("expected list to start off empty.", 
-        !list.iterator().hasNext());
+      Assertions.assertTrue(
+              !list.iterator().hasNext(), "expected list to start off empty.");
     for (CachedBlock block : blocks) {
-      Assert.assertTrue(list.add(block));
+      Assertions.assertTrue(list.add(block));
     }
   }
 
@@ -96,7 +96,7 @@
       CachedBlocksList list, CachedBlock[] blocks) {
     int i = 0;
     for (Iterator<CachedBlock> iter = list.iterator(); iter.hasNext(); ) {
-      Assert.assertEquals(blocks[i], iter.next());
+      Assertions.assertEquals(blocks[i], iter.next());
       i++;
     }
     if (r.nextBoolean()) {
@@ -111,14 +111,14 @@
       for (int removed = 0; removed < remainingBlocks.length; ) {
         int toRemove = r.nextInt(remainingBlocks.length);
         if (remainingBlocks[toRemove] != null) {
-          Assert.assertTrue(list.remove(remainingBlocks[toRemove]));
+          Assertions.assertTrue(list.remove(remainingBlocks[toRemove]));
           remainingBlocks[toRemove] = null;
           removed++;
         }
       }
     }
-    Assert.assertTrue("expected list to be empty after everything " +
-        "was removed.", !list.iterator().hasNext());
+      Assertions.assertTrue(!list.iterator().hasNext(), "expected list to be empty after everything " +
+              "was removed.");
   }
 
   @Test(timeout=60000)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
index d7920a7..b3916e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
@@ -20,7 +20,7 @@
 import java.util.Random;
 import java.util.UUID;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -43,9 +43,9 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.VersionInfo;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test if FSNamesystem handles heartbeat right
@@ -64,7 +64,7 @@
   private int totalBlockGroups, blockGroupSize, stripesPerBlock, cellSize;
   private LocatedStripedBlock locatedStripedBlock;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     ecPolicy = SystemErasureCodingPolicies.getByID(
         SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
@@ -99,7 +99,7 @@
     locatedStripedBlock = (LocatedStripedBlock)(lbs.get(0));
   }
 
-  @After
+  @AfterEach
   public void teardown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -262,13 +262,13 @@
       invalidateBlocks = (InvalidateBlocks) Whitebox
           .getInternalState(cluster.getNamesystem().getBlockManager(),
               "invalidateBlocks");
-      assertEquals("Invalidate blocks should include both Replicas and " +
-          "Striped BlockGroups!",
-          (long) expected, invalidateBlocks.numBlocks());
-      assertEquals("Unexpected invalidate count for replicas!",
-          totalReplicas, invalidateBlocks.getBlocks());
-      assertEquals("Unexpected invalidate count for striped block groups!",
-          totalStripedDataBlocks, invalidateBlocks.getECBlocks());
+        assertEquals(
+                (long) expected, invalidateBlocks.numBlocks(), "Invalidate blocks should include both Replicas and " +
+                "Striped BlockGroups!");
+        assertEquals(
+                totalReplicas, invalidateBlocks.getBlocks(), "Unexpected invalidate count for replicas!");
+        assertEquals(
+                totalStripedDataBlocks, invalidateBlocks.getECBlocks(), "Unexpected invalidate count for striped block groups!");
     } finally {
       namesystem.writeUnlock();
     }
@@ -286,8 +286,8 @@
       try {
         bm.getDatanodeManager().registerDatanode(reg);
         expected -= (totalReplicasPerDataNode + totalBlockGroupsPerDataNode);
-        assertEquals("Expected number of invalidate blocks to decrease",
-            (long) expected, invalidateBlocks.numBlocks());
+          assertEquals(
+                  (long) expected, invalidateBlocks.numBlocks(), "Expected number of invalidate blocks to decrease");
       } finally {
           namesystem.writeUnlock();
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index c107c73..96a5ccc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.when;
 
@@ -36,7 +33,7 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 
@@ -83,13 +80,13 @@
       long expectedReplicaCount, long expectedStripedBlockCount) {
     long totalExpectedCorruptBlocks = expectedReplicaCount +
         expectedStripedBlockCount;
-    assertEquals("Unexpected total corrupt blocks count!",
-        totalExpectedCorruptBlocks, corruptReplicasMap.size());
-    assertEquals("Unexpected replica blocks count!",
-        expectedReplicaCount, corruptReplicasMap.getCorruptBlocks());
-    assertEquals("Unexpected striped blocks count!",
-        expectedStripedBlockCount,
-        corruptReplicasMap.getCorruptECBlockGroups());
+      assertEquals(
+              totalExpectedCorruptBlocks, corruptReplicasMap.size(), "Unexpected total corrupt blocks count!");
+      assertEquals(
+              expectedReplicaCount, corruptReplicasMap.getCorruptBlocks(), "Unexpected replica blocks count!");
+      assertEquals(
+              expectedStripedBlockCount,
+              corruptReplicasMap.getCorruptECBlockGroups(), "Unexpected striped blocks count!");
   }
   
   @Test
@@ -101,21 +98,21 @@
     when(bim.isStripedBlock(any(Block.class))).thenCallRealMethod();
     assertTrue(!bim.isLegacyBlock(new Block(-1)));
 
-    // Make sure initial values are returned correctly
-    assertEquals("Total number of corrupt blocks must initially be 0!",
-        0, crm.size());
-    assertEquals("Number of corrupt replicas must initially be 0!",
-        0, crm.getCorruptBlocks());
-    assertEquals("Number of corrupt striped block groups must initially be 0!",
-        0, crm.getCorruptECBlockGroups());
-    assertNull("Param n cannot be less than 0",
-        crm.getCorruptBlockIdsForTesting(bim, BlockType.CONTIGUOUS, -1, null));
-    assertNull("Param n cannot be greater than 100",
-        crm.getCorruptBlockIdsForTesting(bim, BlockType.CONTIGUOUS, 101, null));
+      // Make sure initial values are returned correctly
+      assertEquals(
+              0, crm.size(), "Total number of corrupt blocks must initially be 0!");
+      assertEquals(
+              0, crm.getCorruptBlocks(), "Number of corrupt replicas must initially be 0!");
+      assertEquals(
+              0, crm.getCorruptECBlockGroups(), "Number of corrupt striped block groups must initially be 0!");
+      assertNull(
+              crm.getCorruptBlockIdsForTesting(bim, BlockType.CONTIGUOUS, -1, null), "Param n cannot be less than 0");
+      assertNull(
+              crm.getCorruptBlockIdsForTesting(bim, BlockType.CONTIGUOUS, 101, null), "Param n cannot be greater than 100");
     long[] l = crm.getCorruptBlockIdsForTesting(
         bim, BlockType.CONTIGUOUS, 0, null);
-    assertNotNull("n = 0 must return non-null", l);
-    assertEquals("n = 0 must return an empty list", 0, l.length);
+      assertNotNull(l, "n = 0 must return non-null");
+      assertEquals(0, l.length, "n = 0 must return an empty list");
 
     // Create a list of block ids. A list is used to allow easy
     // validation of the output of getCorruptReplicaBlockIds.
@@ -165,25 +162,25 @@
       addToCorruptReplicasMap(crm, getStripedBlock(blockId), dn1);
     }
 
-    assertEquals("Number of corrupt blocks not returning correctly",
-        2 * blockCount, crm.size());
-    assertTrue("First five corrupt replica blocks ids are not right!",
-        Arrays.equals(Arrays.copyOfRange(replicaIds, 0, 5),
-            crm.getCorruptBlockIdsForTesting(
-                bim, BlockType.CONTIGUOUS, 5, null)));
-    assertTrue("First five corrupt striped blocks ids are not right!",
-        Arrays.equals(Arrays.copyOfRange(stripedIds, 0, 5),
-            crm.getCorruptBlockIdsForTesting(
-                bim, BlockType.STRIPED, 5, null)));
+      assertEquals(
+              2 * blockCount, crm.size(), "Number of corrupt blocks not returning correctly");
+      assertTrue(
+              Arrays.equals(Arrays.copyOfRange(replicaIds, 0, 5),
+                      crm.getCorruptBlockIdsForTesting(
+                              bim, BlockType.CONTIGUOUS, 5, null)), "First five corrupt replica blocks ids are not right!");
+      assertTrue(
+              Arrays.equals(Arrays.copyOfRange(stripedIds, 0, 5),
+                      crm.getCorruptBlockIdsForTesting(
+                              bim, BlockType.STRIPED, 5, null)), "First five corrupt striped blocks ids are not right!");
 
-    assertTrue("10 replica blocks after 7 not returned correctly!",
-        Arrays.equals(Arrays.copyOfRange(replicaIds, 7, 17),
-            crm.getCorruptBlockIdsForTesting(
-                bim, BlockType.CONTIGUOUS, 10, 7L)));
-    assertTrue("10 striped blocks after 7 not returned correctly!",
-        Arrays.equals(Arrays.copyOfRange(stripedIds, 7, 17),
-            crm.getCorruptBlockIdsForTesting(bim, BlockType.STRIPED,
-                10, getStripedBlock(7).getBlockId())));
+      assertTrue(
+              Arrays.equals(Arrays.copyOfRange(replicaIds, 7, 17),
+                      crm.getCorruptBlockIdsForTesting(
+                              bim, BlockType.CONTIGUOUS, 10, 7L)), "10 replica blocks after 7 not returned correctly!");
+      assertTrue(
+              Arrays.equals(Arrays.copyOfRange(stripedIds, 7, 17),
+                      crm.getCorruptBlockIdsForTesting(bim, BlockType.STRIPED,
+                              10, getStripedBlock(7).getBlockId())), "10 striped blocks after 7 not returned correctly!");
   }
   
   private static void addToCorruptReplicasMap(CorruptReplicasMap crm,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptionWithFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptionWithFailover.java
index 06e4f60..4ef74b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptionWithFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptionWithFailover.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
index eefc1d5..0ea6637 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.ArrayList;
 
@@ -28,7 +25,7 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests that methods in DatanodeDescriptor
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index 5f5452a..ec40c94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -68,12 +68,13 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestDatanodeManager {
   
@@ -133,9 +134,9 @@
 
     //Verify DatanodeManager has the correct count
     Map<String, Integer> mapToCheck = dm.getDatanodesSoftwareVersions();
-    assertNull("should be no more version0 nodes", mapToCheck.get("version0"));
-    assertEquals("should be one version1 node",
-        mapToCheck.get("version1").intValue(), 1);
+      assertNull(mapToCheck.get("version0"), "should be no more version0 nodes");
+      assertEquals(
+              mapToCheck.get("version1").intValue(), 1, "should be one version1 node");
   }
 
   /**
@@ -237,9 +238,9 @@
         LOG.info("Still in map: " + entry.getKey() + " has "
           + entry.getValue());
       }
-      assertEquals("The map of version counts returned by DatanodeManager was"
-        + " not what it was expected to be on iteration " + i, 0,
-        mapToCheck.size());
+        assertEquals(0,
+                mapToCheck.size(), "The map of version counts returned by DatanodeManager was"
+                + " not what it was expected to be on iteration " + i);
     }
   }
   
@@ -272,12 +273,12 @@
     try {
       //Register this node
       dm.registerDatanode(dr);
-      Assert.fail("Expected an UnresolvedTopologyException");
+      Assertions.fail("Expected an UnresolvedTopologyException");
     } catch (UnresolvedTopologyException ute) {
       LOG.info("Expected - topology is not resolved and " +
           "registration is rejected.");
     } catch (Exception e) {
-      Assert.fail("Expected an UnresolvedTopologyException");
+      Assertions.fail("Expected an UnresolvedTopologyException");
     }
   }
   
@@ -893,12 +894,12 @@
     // Sort the list so that we know which one is which
     Collections.sort(both);
 
-    Assert.assertEquals("Incorrect number of hosts reported",
-        2, both.size());
-    Assert.assertEquals("Unexpected host or host in unexpected position",
-        "127.0.0.1:12345", both.get(0).getInfoAddr());
-    Assert.assertEquals("Unexpected host or host in unexpected position",
-        "127.0.0.1:23456", both.get(1).getInfoAddr());
+      Assertions.assertEquals(
+              2, both.size(), "Incorrect number of hosts reported");
+      Assertions.assertEquals(
+              "127.0.0.1:12345", both.get(0).getInfoAddr(), "Unexpected host or host in unexpected position");
+      Assertions.assertEquals(
+              "127.0.0.1:23456", both.get(1).getInfoAddr(), "Unexpected host or host in unexpected position");
 
     // Remove one node from includes, but do not add it to excludes.
     hm.refresh(oneNode, noNodes);
@@ -907,10 +908,10 @@
     List<DatanodeDescriptor> onlyOne =
         dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
 
-    Assert.assertEquals("Incorrect number of hosts reported",
-        1, onlyOne.size());
-    Assert.assertEquals("Unexpected host reported",
-        "127.0.0.1:23456", onlyOne.get(0).getInfoAddr());
+      Assertions.assertEquals(
+              1, onlyOne.size(), "Incorrect number of hosts reported");
+      Assertions.assertEquals(
+              "127.0.0.1:23456", onlyOne.get(0).getInfoAddr(), "Unexpected host reported");
 
     // Remove all nodes from includes
     hm.refresh(noNodes, noNodes);
@@ -922,12 +923,12 @@
     // Sort the list so that we know which one is which
     Collections.sort(bothAgain);
 
-    Assert.assertEquals("Incorrect number of hosts reported",
-        2, bothAgain.size());
-    Assert.assertEquals("Unexpected host or host in unexpected position",
-        "127.0.0.1:12345", bothAgain.get(0).getInfoAddr());
-    Assert.assertEquals("Unexpected host or host in unexpected position",
-        "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
+      Assertions.assertEquals(
+              2, bothAgain.size(), "Incorrect number of hosts reported");
+      Assertions.assertEquals(
+              "127.0.0.1:12345", bothAgain.get(0).getInfoAddr(), "Unexpected host or host in unexpected position");
+      Assertions.assertEquals(
+              "127.0.0.1:23456", bothAgain.get(1).getInfoAddr(), "Unexpected host or host in unexpected position");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestErasureCodingCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestErasureCodingCorruption.java
index cb1ef2b..99030e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestErasureCodingCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestErasureCodingCorruption.java
@@ -24,7 +24,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
index f12f6f5..0ced726 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.ArrayList;
 
@@ -43,7 +41,7 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
index b4d89d7..18dad64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
@@ -18,20 +18,17 @@
 
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestHost2NodesMap {
   private final Host2NodesMap map = new Host2NodesMap();
   private DatanodeDescriptor dataNodes[];
   
-  @Before
+  @BeforeEach
   public void setup() {
     dataNodes = new DatanodeDescriptor[] {
         DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java
index 38d0905..18eab4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java
@@ -23,8 +23,8 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -44,57 +44,57 @@
     // address + port combo.
     s.add(entry("127.0.0.1:12345"));
     s.add(entry("localhost:12345"));
-    Assert.assertEquals(1, s.size());
+    Assertions.assertEquals(1, s.size());
     s.add(entry("127.0.0.1:12345"));
-    Assert.assertEquals(1, s.size());
+    Assertions.assertEquals(1, s.size());
 
     // The following entries should not be de-duped.
     s.add(entry("127.0.0.1:12346"));
-    Assert.assertEquals(2, s.size());
+    Assertions.assertEquals(2, s.size());
     s.add(entry("127.0.0.1"));
-    Assert.assertEquals(3, s.size());
+    Assertions.assertEquals(3, s.size());
     s.add(entry("127.0.0.10"));
-    Assert.assertEquals(4, s.size());
+    Assertions.assertEquals(4, s.size());
   }
 
   @Test
   public void testRelation() {
     HostSet s = new HostSet();
     s.add(entry("127.0.0.1:123"));
-    Assert.assertTrue(s.match(entry("127.0.0.1:123")));
-    Assert.assertFalse(s.match(entry("127.0.0.1:12")));
-    Assert.assertFalse(s.match(entry("127.0.0.1")));
-    Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
-    Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
-    Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
-    Assert.assertFalse(s.match(entry("127.0.0.2")));
-    Assert.assertFalse(s.match(entry("127.0.0.2:123")));
-    Assert.assertFalse(s.matchedBy(entry("127.0.0.2")));
-    Assert.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
+    Assertions.assertTrue(s.match(entry("127.0.0.1:123")));
+    Assertions.assertFalse(s.match(entry("127.0.0.1:12")));
+    Assertions.assertFalse(s.match(entry("127.0.0.1")));
+    Assertions.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
+    Assertions.assertTrue(s.matchedBy(entry("127.0.0.1")));
+    Assertions.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
+    Assertions.assertFalse(s.match(entry("127.0.0.2")));
+    Assertions.assertFalse(s.match(entry("127.0.0.2:123")));
+    Assertions.assertFalse(s.matchedBy(entry("127.0.0.2")));
+    Assertions.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
 
     s.add(entry("127.0.0.1"));
-    Assert.assertTrue(s.match(entry("127.0.0.1:123")));
-    Assert.assertTrue(s.match(entry("127.0.0.1:12")));
-    Assert.assertTrue(s.match(entry("127.0.0.1")));
-    Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
-    Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
-    Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
-    Assert.assertFalse(s.match(entry("127.0.0.2")));
-    Assert.assertFalse(s.match(entry("127.0.0.2:123")));
-    Assert.assertFalse(s.matchedBy(entry("127.0.0.2")));
-    Assert.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
+    Assertions.assertTrue(s.match(entry("127.0.0.1:123")));
+    Assertions.assertTrue(s.match(entry("127.0.0.1:12")));
+    Assertions.assertTrue(s.match(entry("127.0.0.1")));
+    Assertions.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
+    Assertions.assertTrue(s.matchedBy(entry("127.0.0.1")));
+    Assertions.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
+    Assertions.assertFalse(s.match(entry("127.0.0.2")));
+    Assertions.assertFalse(s.match(entry("127.0.0.2:123")));
+    Assertions.assertFalse(s.matchedBy(entry("127.0.0.2")));
+    Assertions.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
 
     s.add(entry("127.0.0.2:123"));
-    Assert.assertTrue(s.match(entry("127.0.0.1:123")));
-    Assert.assertTrue(s.match(entry("127.0.0.1:12")));
-    Assert.assertTrue(s.match(entry("127.0.0.1")));
-    Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
-    Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
-    Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
-    Assert.assertFalse(s.match(entry("127.0.0.2")));
-    Assert.assertTrue(s.match(entry("127.0.0.2:123")));
-    Assert.assertTrue(s.matchedBy(entry("127.0.0.2")));
-    Assert.assertTrue(s.matchedBy(entry("127.0.0.2:123")));
+    Assertions.assertTrue(s.match(entry("127.0.0.1:123")));
+    Assertions.assertTrue(s.match(entry("127.0.0.1:12")));
+    Assertions.assertTrue(s.match(entry("127.0.0.1")));
+    Assertions.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
+    Assertions.assertTrue(s.matchedBy(entry("127.0.0.1")));
+    Assertions.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
+    Assertions.assertFalse(s.match(entry("127.0.0.2")));
+    Assertions.assertTrue(s.match(entry("127.0.0.2:123")));
+    Assertions.assertTrue(s.matchedBy(entry("127.0.0.2")));
+    Assertions.assertTrue(s.matchedBy(entry("127.0.0.2:123")));
   }
 
   @Test
@@ -115,8 +115,8 @@
     excludedNodes.add(entry("127.0.0.1:12346"));
     excludedNodes.add(entry("127.0.30.1:12346"));
 
-    Assert.assertEquals(2, includedNodes.size());
-    Assert.assertEquals(2, excludedNodes.size());
+    Assertions.assertEquals(2, includedNodes.size());
+    Assertions.assertEquals(2, excludedNodes.size());
 
     hm.refresh(includedNodes, excludedNodes);
 
@@ -127,30 +127,30 @@
 
     // After the de-duplication, there should be only one DN from the included
     // nodes declared as dead.
-    Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
+    Assertions.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.ALL).size());
-    Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
+    Assertions.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
     dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1",
             "localhost", "uuid-foo", 12345, 1020, 1021, 1022)));
-    Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
+    Assertions.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
     dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2",
             "127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022)));
-    Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
+    Assertions.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
     DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
             ".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
     DFSTestUtil.setDatanodeDead(spam);
     includedNodes.add(entry("127.0.0.3:12345"));
     dnMap.put("uuid-spam", spam);
-    Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
+    Assertions.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
     dnMap.remove("uuid-spam");
-    Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
+    Assertions.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
     excludedNodes.add(entry("127.0.0.3"));
-    Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
+    Assertions.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index e33e24f..4b9e330 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -26,14 +26,11 @@
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test {@link LowRedundancyBlocks}.
@@ -76,31 +73,31 @@
       int corruptReplicationOneCount, int lowRedundancyStripedCount,
       int corruptStripedCount, int highestPriorityReplicatedBlockCount,
       int highestPriorityECBlockCount) {
-    assertEquals("Low redundancy replica count incorrect!",
-        lowRedundancyReplicaCount, queues.getLowRedundancyBlocks());
-    assertEquals("Corrupt replica count incorrect!",
-        corruptReplicaCount, queues.getCorruptBlocks());
-    assertEquals("Corrupt replica one count incorrect!",
-        corruptReplicationOneCount,
-        queues.getCorruptReplicationOneBlocks());
-    assertEquals("Low redundancy striped blocks count incorrect!",
-        lowRedundancyStripedCount, queues.getLowRedundancyECBlockGroups());
-    assertEquals("Corrupt striped blocks count incorrect!",
-        corruptStripedCount, queues.getCorruptECBlockGroups());
-    assertEquals("Low Redundancy count incorrect!",
-        lowRedundancyReplicaCount + lowRedundancyStripedCount,
-        queues.getLowRedundancyBlockCount());
-    assertEquals("LowRedundancyBlocks queue size incorrect!",
-        (lowRedundancyReplicaCount + corruptReplicaCount +
-        lowRedundancyStripedCount + corruptStripedCount), queues.size());
-    assertEquals("Highest priority replicated low redundancy " +
-            "blocks count is incorrect!",
-        highestPriorityReplicatedBlockCount,
-        queues.getHighestPriorityReplicatedBlockCount());
-    assertEquals("Highest priority erasure coded low redundancy " +
-            "blocks count is incorrect!",
-        highestPriorityECBlockCount,
-        queues.getHighestPriorityECBlockCount());
+      assertEquals(
+              lowRedundancyReplicaCount, queues.getLowRedundancyBlocks(), "Low redundancy replica count incorrect!");
+      assertEquals(
+              corruptReplicaCount, queues.getCorruptBlocks(), "Corrupt replica count incorrect!");
+      assertEquals(
+              corruptReplicationOneCount,
+              queues.getCorruptReplicationOneBlocks(), "Corrupt replica one count incorrect!");
+      assertEquals(
+              lowRedundancyStripedCount, queues.getLowRedundancyECBlockGroups(), "Low redundancy striped blocks count incorrect!");
+      assertEquals(
+              corruptStripedCount, queues.getCorruptECBlockGroups(), "Corrupt striped blocks count incorrect!");
+      assertEquals(
+              lowRedundancyReplicaCount + lowRedundancyStripedCount,
+              queues.getLowRedundancyBlockCount(), "Low Redundancy count incorrect!");
+      assertEquals(
+              (lowRedundancyReplicaCount + corruptReplicaCount +
+                      lowRedundancyStripedCount + corruptStripedCount), queues.size(), "LowRedundancyBlocks queue size incorrect!");
+      assertEquals(
+              highestPriorityReplicatedBlockCount,
+              queues.getHighestPriorityReplicatedBlockCount(), "Highest priority replicated low redundancy " +
+              "blocks count is incorrect!");
+      assertEquals(
+              highestPriorityECBlockCount,
+              queues.getHighestPriorityECBlockCount(), "Highest priority erasure coded low redundancy " +
+              "blocks count is incorrect!");
   }
 
   /**
@@ -292,11 +289,11 @@
                            int curReplicas,
                            int decommissionedReplicas,
                            int expectedReplicas) {
-    assertTrue("Failed to add " + block,
-               queues.add(block,
-                          curReplicas, 0,
-                          decommissionedReplicas,
-                          expectedReplicas));
+      assertTrue(
+              queues.add(block,
+                      curReplicas, 0,
+                      decommissionedReplicas,
+                      expectedReplicas), "Failed to add " + block);
   }
 
   /**
@@ -329,7 +326,7 @@
     neededReconstruction.add(block, 2, 0, 1, 3);
     neededReconstruction.add(block, 0, 0, 0, 3);
     neededReconstruction.remove(block, LowRedundancyBlocks.LEVEL);
-    assertFalse("Should not contain the block.",
-        neededReconstruction.contains(block));
+      assertFalse(
+              neededReconstruction.contains(block), "Should not contain the block.");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index dea893b..4ee9748 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -45,8 +45,8 @@
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.event.Level;
 
 import java.io.BufferedReader;
@@ -60,12 +60,9 @@
 import java.util.Iterator;
 import java.util.UUID;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.*;
 
 
 public class TestNameNodePrunesMissingStorages {
@@ -207,7 +204,7 @@
       int datanodeToRemoveStorageFromIdx = 0;
       while (true) {
         if (datanodeToRemoveStorageFromIdx >= cluster.getDataNodes().size()) {
-          Assert.fail("failed to find datanode with uuid " + datanodeUuid);
+          Assertions.fail("failed to find datanode with uuid " + datanodeUuid);
           datanodeToRemoveStorageFrom = null;
           break;
         }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
index d915b6e..6373619 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.concurrent.TimeoutException;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
index d5f2fb9..5aef81f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 
@@ -40,7 +40,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestOverReplicatedBlocks {
   /** Test processOverReplicatedBlock can handle corrupt replicas fine.
@@ -75,8 +75,8 @@
           "scanner.cursor");
       //wait for one minute for deletion to succeed;
       for(int i = 0; !scanCursor.delete(); i++) {
-        assertTrue("Could not delete " + scanCursor.getAbsolutePath() +
-            " in one minute", i < 60);
+          assertTrue(i < 60, "Could not delete " + scanCursor.getAbsolutePath() +
+                  " in one minute");
         try {
           Thread.sleep(1000);
         } catch (InterruptedException ignored) {}
@@ -183,12 +183,12 @@
       // And should not actually be deleted, because lastDN does not heartbeat.
       namesystem.readLock();
       final int dnBlocks = bm.getExcessSize4Testing(dnReg.getDatanodeUuid());
-      assertEquals("Replicas on node " + lastDNid + " should have been deleted",
-          SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks);
+        assertEquals(
+                SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks, "Replicas on node " + lastDNid + " should have been deleted");
       namesystem.readUnlock();
       for(BlockLocation location : locs)
-        assertEquals("Block should still have 4 replicas",
-            4, location.getNames().length);
+          assertEquals(
+                  4, location.getNames().length, "Block should still have 4 replicas");
     } finally {
       if(fs != null) fs.close();
       if(cluster != null) cluster.shutdown();
@@ -215,8 +215,8 @@
       fs.setReplication(p, (short) 1);
       out.close();
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
-      assertEquals("Expected only one live replica for the block", 1, bm
-          .countNodes(bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
+        assertEquals(1, bm
+                .countNodes(bm.getStoredBlock(block.getLocalBlock())).liveReplicas(), "Expected only one live replica for the block");
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
index ebc073d..6c02a56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 import java.util.Queue;
 
@@ -36,9 +36,9 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.junit.Test;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
+import org.junit.jupiter.api.Test;
 
 
 public class TestPendingDataNodeMessages {
@@ -107,8 +107,8 @@
       // processing IBR
       int pendingIBRMsg = cluster.getNameNode(1).getNamesystem()
           .getBlockManager().getPendingDataNodeMessageCount();
-      assertEquals("All DN message should processed after tail edits", 0,
-          pendingIBRMsg);
+        assertEquals(0,
+                pendingIBRMsg, "All DN message should processed after tail edits");
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
index 34e7390..75c3ad5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
@@ -32,10 +32,10 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.slf4j.event.Level;
 
@@ -57,7 +57,7 @@
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -75,7 +75,7 @@
     dfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -102,23 +102,23 @@
     dfs.delete(foo, true);
 
     waitForNumPendingDeletionBlocks(REPLICATION);
-    Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
-    Assert.assertEquals(REPLICATION, cluster.getNamesystem()
+    Assertions.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
+    Assertions.assertEquals(REPLICATION, cluster.getNamesystem()
         .getPendingDeletionBlocks());
-    Assert.assertEquals(REPLICATION,
+    Assertions.assertEquals(REPLICATION,
         dfs.getPendingDeletionBlocksCount());
     Mockito.doReturn(0L).when(mockIb).getInvalidationDelay();
 
     waitForNumPendingDeletionBlocks(0);
-    Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
-    Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
-    Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
+    Assertions.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
+    Assertions.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
+    Assertions.assertEquals(0, dfs.getPendingDeletionBlocksCount());
     long nnStarted = cluster.getNamesystem().getNNStartedTimeInMillis();
     long blockDeletionStartTime = cluster.getNamesystem()
         .getBlockDeletionStartTime();
-    Assert.assertTrue(String.format(
-        "Expect blockDeletionStartTime = %d > nnStarted = %d.",
-        blockDeletionStartTime, nnStarted), blockDeletionStartTime > nnStarted);
+      Assertions.assertTrue(blockDeletionStartTime > nnStarted, String.format(
+              "Expect blockDeletionStartTime = %d > nnStarted = %d.",
+              blockDeletionStartTime, nnStarted));
 
     // test client protocol compatibility
     Method method = DFSClient.class.
@@ -130,8 +130,8 @@
     // get an out of index value
     long invalidState = (Long) method.invoke(dfs.getClient(),
         ClientProtocol.STATS_ARRAY_LENGTH);
-    Assert.assertEquals(0, validState);
-    Assert.assertEquals(-1, invalidState);
+    Assertions.assertEquals(0, validState);
+    Assertions.assertEquals(-1, invalidState);
   }
 
   /**
@@ -170,7 +170,7 @@
     Whitebox.setInternalState(cluster.getNamesystem().getBlockManager(),
         "invalidateBlocks", mockIb);
 
-    Assert.assertEquals(0L, cluster.getNamesystem().getPendingDeletionBlocks());
+    Assertions.assertEquals(0L, cluster.getNamesystem().getPendingDeletionBlocks());
     // restart DataNodes
     for (int i = 0; i < REPLICATION; i++) {
       cluster.restartDataNode(dnprops[i]);
@@ -182,13 +182,13 @@
     }
     Thread.sleep(2000);
     // make sure we have received block reports by checking the total block #
-    Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
-    Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
+    Assertions.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
+    Assertions.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
 
     cluster.restartNameNode(true);
     waitForNumPendingDeletionBlocks(0);
-    Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
-    Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
+    Assertions.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
+    Assertions.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
   }
 
   private long waitForReplication() throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index ea7347f..cf09133 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -22,10 +22,7 @@
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -96,8 +93,8 @@
       System.arraycopy(storages, 0, targets, 0, i);
       pendingReconstructions.increment(block, targets);
     }
-    assertEquals("Size of pendingReconstruction ",
-                 10, pendingReconstructions.size());
+      assertEquals(
+              10, pendingReconstructions.size(), "Size of pendingReconstruction ");
 
 
     //
@@ -105,15 +102,15 @@
     //
     BlockInfo blk = genBlockInfo(8, 8, 0);
     pendingReconstructions.decrement(blk, storages[7]); // removes one replica
-    assertEquals("pendingReconstructions.getNumReplicas ",
-                 7, pendingReconstructions.getNumReplicas(blk));
+      assertEquals(
+              7, pendingReconstructions.getNumReplicas(blk), "pendingReconstructions.getNumReplicas ");
 
     //
     // insert the same item twice should be counted as once
     //
     pendingReconstructions.increment(blk, storages[0]);
-    assertEquals("pendingReconstructions.getNumReplicas ",
-        7, pendingReconstructions.getNumReplicas(blk));
+      assertEquals(
+              7, pendingReconstructions.getNumReplicas(blk), "pendingReconstructions.getNumReplicas ");
 
     for (int i = 0; i < 7; i++) {
       // removes all replicas
@@ -170,10 +167,10 @@
     System.out.println("Had to wait for " + loop +
                        " seconds for the lot to timeout");
 
-    //
-    // Verify that everything has timed out.
-    //
-    assertEquals("Size of pendingReconstructions ", 0, pendingReconstructions.size());
+      //
+      // Verify that everything has timed out.
+      //
+      assertEquals(0, pendingReconstructions.size(), "Size of pendingReconstructions ");
     assertEquals(15L, pendingReconstructions.getNumTimedOuts());
     Block[] timedOut = pendingReconstructions.getTimedOutBlocks();
     assertNotNull(timedOut);
@@ -227,8 +224,8 @@
       //Save it for later.
       BlockInfo storedBlock = blockInfo;
 
-      assertEquals("Size of pendingReconstructions ", 1,
-          pendingReconstruction.size());
+        assertEquals(1,
+                pendingReconstruction.size(), "Size of pendingReconstructions ");
 
       // Add a second block to pendingReconstructions that has no
       // corresponding entry in blocksmap
@@ -237,9 +234,9 @@
       pendingReconstruction.increment(blockInfo,
           DFSTestUtil.createDatanodeStorageInfos(1));
 
-      // verify 2 blocks in pendingReconstructions
-      assertEquals("Size of pendingReconstructions ", 2,
-          pendingReconstruction.size());
+        // verify 2 blocks in pendingReconstructions
+        assertEquals(2,
+                pendingReconstruction.size(), "Size of pendingReconstructions ");
 
       //
       // Wait for everything to timeout.
@@ -264,13 +261,13 @@
       // Verify that the generation stamp we will try to replicate
       // is now 1
       for (Block b: neededReconstruction) {
-        assertEquals("Generation stamp is 1 ", 1,
-            b.getGenerationStamp());
+          assertEquals(1,
+                  b.getGenerationStamp(), "Generation stamp is 1 ");
       }
 
-      // Verify size of neededReconstruction is exactly 1.
-      assertEquals("size of neededReconstruction is 1 ", 1,
-          neededReconstruction.size());
+        // Verify size of neededReconstruction is exactly 1.
+        assertEquals(1,
+                neededReconstruction.size(), "size of neededReconstruction is 1 ");
 
       // Verify HDFS-11960
       // Stop the replication/redundancy monitor
@@ -283,8 +280,8 @@
       // Add a stored block to the pendingReconstruction.
       pendingReconstruction.increment(blockInfo,
           DFSTestUtil.createDatanodeStorageInfos(1));
-      assertEquals("Size of pendingReconstructions ", 1,
-          pendingReconstruction.size());
+        assertEquals(1,
+                pendingReconstruction.size(), "Size of pendingReconstructions ");
 
       // A received IBR processing calls addBlock(). If the gen stamp in the
       // report is not the same, it should stay in pending.
@@ -297,9 +294,9 @@
         fsn.writeUnlock();
       }
 
-      // The block should still be pending
-      assertEquals("Size of pendingReconstructions ", 1,
-          pendingReconstruction.size());
+        // The block should still be pending
+        assertEquals(1,
+                pendingReconstruction.size(), "Size of pendingReconstructions ");
 
       // A block report with the correct gen stamp should remove the record
       // from the pending queue.
@@ -313,9 +310,9 @@
 
       GenericTestUtils.waitFor(() -> pendingReconstruction.size() == 0, 500,
           10000);
-      // The pending queue should be empty.
-      assertEquals("Size of pendingReconstructions ", 0,
-          pendingReconstruction.size());
+        // The pending queue should be empty.
+        assertEquals(0,
+                pendingReconstruction.size(), "Size of pendingReconstructions ");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingRecoveryBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingRecoveryBlocks.java
index baad89f..bef0c63 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingRecoveryBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingRecoveryBlocks.java
@@ -18,12 +18,12 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * This class contains unit tests for PendingRecoveryBlocks.java functionality.
@@ -37,7 +37,7 @@
   private final BlockInfo blk2 = getBlock(2);
   private final BlockInfo blk3 = getBlock(3);
 
-  @Before
+  @BeforeEach
   public void setUp() {
     pendingRecoveryBlocks =
         Mockito.spy(new PendingRecoveryBlocks(recoveryTimeout));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
index c7f8379..50ae28c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java
@@ -26,12 +26,13 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.util.RwLock;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
 import java.io.IOException;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -46,7 +47,7 @@
   private String providedStorageID;
   private String blockPoolID;
 
-  @Before
+  @BeforeEach
   public void setup() {
     providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT;
     conf = new HdfsConfiguration();
@@ -92,15 +93,15 @@
         providedMap.getStorage(dn1, dn1ProvidedStorage);
     DatanodeStorageInfo dns1Disk = providedMap.getStorage(dn1, dn1DiskStorage);
 
-    assertTrue("The provided storages should be equal",
-        dns1Provided == providedMapStorage);
-    assertTrue("Disk storage has not yet been registered with block manager",
-        dns1Disk == null);
+      assertTrue(
+              dns1Provided == providedMapStorage, "The provided storages should be equal");
+      assertTrue(
+              dns1Disk == null, "Disk storage has not yet been registered with block manager");
     // add the disk storage to the datanode.
     DatanodeStorageInfo dnsDisk = new DatanodeStorageInfo(dn1, dn1DiskStorage);
     dn1.injectStorage(dnsDisk);
-    assertTrue("Disk storage must match the injected storage info",
-        dnsDisk == providedMap.getStorage(dn1, dn1DiskStorage));
+      assertTrue(
+              dnsDisk == providedMap.getStorage(dn1, dn1DiskStorage), "Disk storage must match the injected storage info");
 
     // create a 2nd datanode
     DatanodeDescriptor dn2 = createDatanodeDescriptor(5010);
@@ -112,9 +113,9 @@
 
     DatanodeStorageInfo dns2Provided = providedMap.getStorage(
         dn2, dn2ProvidedStorage);
-    assertTrue("The provided storages should be equal",
-        dns2Provided == providedMapStorage);
-    assertTrue("The DatanodeDescriptor should contain the provided storage",
-        dn2.getStorageInfo(providedStorageID) == providedMapStorage);
+      assertTrue(
+              dns2Provided == providedMapStorage, "The provided storages should be equal");
+      assertTrue(
+              dn2.getStorageInfo(providedStorageID) == providedMapStorage, "The DatanodeDescriptor should contain the provided storage");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
index f4dd3f7..a710822 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -112,8 +112,8 @@
         }
         Thread.sleep(100);
       }
-      assertEquals("There should be less than 2 replicas in the "
-          + "liveReplicasMap", 1, liveReplicas);
+        assertEquals(1, liveReplicas, "There should be less than 2 replicas in the "
+                + "liveReplicasMap");
       
       while (true) {
         if ((liveReplicas =
@@ -124,7 +124,7 @@
         }
         Thread.sleep(100);
       }
-      assertEquals("There should be two live replicas", 2, liveReplicas);
+        assertEquals(2, liveReplicas, "There should be two live replicas");
 
       while (true) {
         Thread.sleep(100);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 19b8450..51280cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -35,10 +35,10 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -95,7 +95,7 @@
   private static final HdfsConfiguration conf = new HdfsConfiguration();
   private DistributedFileSystem fs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws Exception {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
@@ -103,7 +103,7 @@
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -178,8 +178,8 @@
       DatanodeStorageInfo storage = it.next();
       rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation());
     }
-    Assert.assertEquals("rackSet size is wrong: " + rackSet, dataBlocks - 1,
-        rackSet.size());
+      Assertions.assertEquals(dataBlocks - 1,
+              rackSet.size(), "rackSet size is wrong: " + rackSet);
 
     // restart the stopped datanode
     cluster.restartDataNode(lastHost);
@@ -188,8 +188,8 @@
     // make sure we have 6 racks again
     NetworkTopology topology = bm.getDatanodeManager().getNetworkTopology();
     LOG.info("topology is: {}", topology);
-    Assert.assertEquals(hosts.length, topology.getNumOfLeaves());
-    Assert.assertEquals(dataBlocks, topology.getNumOfRacks());
+    Assertions.assertEquals(hosts.length, topology.getNumOfLeaves());
+    Assertions.assertEquals(dataBlocks, topology.getNumOfRacks());
 
     // pause all the heartbeats
     for (DataNode dn : cluster.getDataNodes()) {
@@ -211,8 +211,8 @@
         DatanodeStorageInfo storage = it.next();
         if (storage != null) {
           DatanodeDescriptor dn = storage.getDatanodeDescriptor();
-          Assert.assertEquals("Block to be erasure coded is wrong for datanode:"
-              + dn, 0, dn.getNumberOfBlocksToBeErasureCoded());
+            Assertions.assertEquals(0, dn.getNumberOfBlocksToBeErasureCoded(), "Block to be erasure coded is wrong for datanode:"
+                    + dn);
           if (dn.getNumberOfBlocksToBeReplicated() == 1) {
             scheduled = true;
           }
@@ -223,7 +223,7 @@
       }
       Thread.sleep(1000);
     }
-    Assert.assertTrue(scheduled);
+    Assertions.assertTrue(scheduled);
   }
 
   @Test
@@ -270,7 +270,7 @@
     LocatedBlocks blks = fs.getClient().getLocatedBlocks(file.toString(), 0);
     LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock();
     for (DatanodeInfo dn : block.getLocations()) {
-      Assert.assertFalse(dn.getHostName().equals("host1"));
+      Assertions.assertFalse(dn.getHostName().equals("host1"));
     }
   }
 
@@ -321,11 +321,11 @@
       recovered = bm.countNodes(blockInfo).liveReplicas() >=
           dataBlocks + parityBlocks;
     }
-    Assert.assertTrue(recovered);
+    Assertions.assertTrue(recovered);
 
     // mark h9 as decommissioning
     DataNode datanode9 = getDataNode(hostNames[hostNames.length - 3]);
-    Assert.assertNotNull(datanode9);
+    Assertions.assertNotNull(datanode9);
     final DatanodeDescriptor dn9 = dm.getDatanode(datanode9.getDatanodeId());
     dn9.startDecommission();
 
@@ -338,7 +338,7 @@
 
     // start decommissioning h9
     boolean satisfied = bm.isPlacementPolicySatisfied(blockInfo);
-    Assert.assertFalse(satisfied);
+    Assertions.assertFalse(satisfied);
     final DatanodeAdminManager decomManager =
         (DatanodeAdminManager) Whitebox.getInternalState(
             dm, "datanodeAdminManager");
@@ -356,7 +356,7 @@
       Thread.sleep(1000);
       decommissioned = dn9.isDecommissioned();
     }
-    Assert.assertTrue(decommissioned);
-    Assert.assertTrue(bm.isPlacementPolicySatisfied(blockInfo));
+    Assertions.assertTrue(decommissioned);
+    Assertions.assertTrue(bm.isPlacementPolicySatisfied(blockInfo));
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRedundancyMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRedundancyMonitor.java
index 0667e26..3ac6478 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRedundancyMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRedundancyMonitor.java
@@ -23,7 +23,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.util.ArrayList;
 import java.util.Set;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 57f61b4..0a1ae15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -18,12 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYSTORAGETYPE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -872,10 +867,10 @@
       // Lets wait for the replication interval
       Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
 
-      // Check replication completed successfully. Need not wait till it process
-      // all the 100 normal blocks.
-      assertFalse("Not able to clear the element from high priority list",
-          neededReconstruction.iterator(HIGH_PRIORITY).hasNext());
+        // Check replication completed successfully. Need not wait till it process
+        // all the 100 normal blocks.
+        assertFalse(
+                neededReconstruction.iterator(HIGH_PRIORITY).hasNext(), "Not able to clear the element from high priority list");
     } finally {
       cluster.shutdown();
     }
@@ -945,11 +940,11 @@
       List<List<BlockInfo>> chosenBlocks, int... expectedSizes) {
     int i = 0;
     for(; i < chosenBlocks.size(); i++) {
-      assertEquals("Not returned the expected number for i=" + i,
-          expectedSizes[i], chosenBlocks.get(i).size());
+        assertEquals(
+                expectedSizes[i], chosenBlocks.get(i).size(), "Not returned the expected number for i=" + i);
     }
     for(; i < expectedSizes.length; i++) {
-      assertEquals("Expected size is non-zero for i=" + i, 0, expectedSizes[i]);
+        assertEquals(0, expectedSizes[i], "Expected size is non-zero for i=" + i);
     }
   }
   
@@ -1525,8 +1520,8 @@
     targets = chooseTarget(5, dataNodes[2], null, favouredNodes);
     assertEquals(targets.length, 5);
     for (int i = 0; i < targets.length; i++) {
-      assertTrue("Target should be a part of Expected Targets",
-          expectedTargets.contains(targets[i].getDatanodeDescriptor()));
+        assertTrue(
+                expectedTargets.contains(targets[i].getDatanodeDescriptor()), "Target should be a part of Expected Targets");
     }
   }
 
@@ -1545,8 +1540,8 @@
           favouredNodes);
       assertEquals(targets.length, 2);
       for (int i = 0; i < targets.length; i++) {
-        assertTrue("Target should be a part of Expected Targets",
-            expectedTargets.contains(targets[i].getDatanodeDescriptor()));
+          assertTrue(
+                  expectedTargets.contains(targets[i].getDatanodeDescriptor()), "Target should be a part of Expected Targets");
       }
     } finally {
       ((BlockPlacementPolicyDefault) replicator).setPreferLocalNode(true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
index fef0b45..e431a2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -30,8 +30,8 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -173,9 +173,9 @@
               new ArrayList<DatanodeStorageInfo>(), false, null,
               1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
       for(DatanodeStorageInfo info : targets) {
-        assertTrue("The node "+info.getDatanodeDescriptor().getName()+
-                " has higher load and should not have been picked!",
-            info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
+          assertTrue(
+                  info.getDatanodeDescriptor().getXceiverCount() <= (load / 6) * 1.2, "The node " + info.getDatanodeDescriptor().getName() +
+                  " has higher load and should not have been picked!");
       }
     } finally {
       namenode.getNamesystem().writeUnlock();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
index f2c24a6..59006f30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -30,8 +30,8 @@
 import java.util.Arrays;
 import java.util.Set;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 @RunWith(Parameterized.class)
 public class TestReplicationPolicyExcludeSlowNodes
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index 6572f78..ffcc2887 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -42,7 +40,7 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopologyWithNodeGroup;
 import org.apache.hadoop.net.Node;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTest {
@@ -895,10 +893,10 @@
     favouredNodes.add(dataNodes[3]);
     favouredNodes.add(dataNodes[0]);
     targets = chooseTarget(2, dataNodes[7], null, favouredNodes);
-    assertTrue("1st Replica is incorrect",
-      expectedTargets.contains(targets[0].getDatanodeDescriptor()));
-    assertTrue("2nd Replica is incorrect",
-      expectedTargets.contains(targets[1].getDatanodeDescriptor()));
+      assertTrue(
+              expectedTargets.contains(targets[0].getDatanodeDescriptor()), "1st Replica is incorrect");
+      assertTrue(
+              expectedTargets.contains(targets[1].getDatanodeDescriptor()), "2nd Replica is incorrect");
   }
 
   /**
@@ -928,8 +926,8 @@
     favouredNodes.add(dataNodes[2]);
     targets = chooseTarget(3, dataNodes[3], null, favouredNodes);
     for (int i = 0; i < targets.length; i++) {
-      assertTrue("Target should be a part of Expected Targets",
-          expectedTargets.contains(targets[i].getDatanodeDescriptor()));
+        assertTrue(
+                expectedTargets.contains(targets[i].getDatanodeDescriptor()), "Target should be a part of Expected Targets");
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithUpgradeDomain.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithUpgradeDomain.java
index 6f7d67a..dd076d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithUpgradeDomain.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithUpgradeDomain.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -40,7 +38,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.net.Node;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 public class TestReplicationPolicyWithUpgradeDomain
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
index 25b2a02..5602c1a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
@@ -21,7 +21,7 @@
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_BLOCKS_IN_GROUP;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.spy;
 
@@ -43,10 +43,10 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.stubbing.Answer;
 
 /**
@@ -76,7 +76,7 @@
   private SequentialBlockGroupIdGenerator blockGrpIdGenerator;
   private Path ecDir = new Path("/ecDir");
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
@@ -94,7 +94,7 @@
         StripedFileTestUtil.getDefaultECPolicy().getName());
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -209,8 +209,8 @@
 
     List<LocatedBlock> contiguousBlocks = DFSTestUtil.getAllBlocks(fs, path1);
     assertThat(contiguousBlocks.size(), is(1));
-    Assert.assertEquals("Unexpected BlockId!", curBlockGroupIdValue,
-        contiguousBlocks.get(0).getBlock().getBlockId());
+      Assertions.assertEquals(curBlockGroupIdValue,
+              contiguousBlocks.get(0).getBlock().getBlockId(), "Unexpected BlockId!");
 
     // Reset back to the initial value to trigger collision
     blockGrpIdGenerator.setCurrentValue(blockGroupIdInitialValue);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java
index 89fe8a4..92342a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java
@@ -32,10 +32,10 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.mockito.Mockito.*;
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
index 9fda398..81668fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowDiskTracker.java
@@ -19,10 +19,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.ObjectMapper;
@@ -33,6 +30,7 @@
     .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys
     .DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -48,9 +46,9 @@
 import java.util.function.Supplier;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
 
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -89,7 +87,7 @@
     conf.setTimeDuration(DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
         OUTLIERS_REPORT_INTERVAL, TimeUnit.MILLISECONDS);
   }
-  @Before
+  @BeforeEach
   public void setup() {
     timer = new FakeTimer();
     tracker = new SlowDiskTracker(conf, timer);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowPeerTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowPeerTracker.java
index fb2928c..0239da2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowPeerTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowPeerTracker.java
@@ -26,9 +26,9 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.SlowPeerTracker
     .ReportForJson;
 import org.apache.hadoop.util.FakeTimer;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -36,10 +36,10 @@
 import java.io.IOException;
 import java.util.Set;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Tests for {@link SlowPeerTracker}.
@@ -61,7 +61,7 @@
   private static final ObjectReader READER =
       new ObjectMapper().readerFor(new TypeReference<Set<ReportForJson>>() {});
 
-  @Before
+  @BeforeEach
   public void setup() {
     conf = new HdfsConfiguration();
     timer = new FakeTimer();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedBlock.java
index 45eedac..9dda574 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedBlock.java
@@ -34,8 +34,8 @@
 import java.util.ArrayList;
 import java.util.Date;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * This class tests the sorting of located blocks based on
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java
index a017cb9..1cc449b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java
@@ -38,9 +38,9 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -63,7 +63,7 @@
   static DatanodeManager dm;
   static final long STALE_INTERVAL = 30 * 1000 * 60;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     dm = mockDatanodeManager();
   }
@@ -365,13 +365,13 @@
 
     for (LocatedBlock lb : lbs) {
       byte[] blockIndices = ((LocatedStripedBlock) lb).getBlockIndices();
-      // after sorting stale block index will be placed after normal nodes.
-      Assert.assertEquals("Failed to move stale node to bottom!", 1,
-          blockIndices[9]);
+        // after sorting stale block index will be placed after normal nodes.
+        Assertions.assertEquals(1,
+                blockIndices[9], "Failed to move stale node to bottom!");
       DatanodeInfo[] locations = lb.getLocations();
-      // After sorting stale node d13 will be placed after normal nodes
-      Assert.assertEquals("Failed to move stale dn after normal one!",
-          staleDns.remove(0), locations[9]);
+        // After sorting stale node d13 will be placed after normal nodes
+        Assertions.assertEquals(
+                staleDns.remove(0), locations[9], "Failed to move stale dn after normal one!");
     }
   }
 
@@ -392,17 +392,17 @@
         LOG.info("Block Locations size={}, locs={}, j=", nodes.length,
             dnInfo.toString(), j);
         if (j < blkGrpWidth) {
-          Assert.assertEquals("Node shouldn't be decommissioned",
-              AdminStates.NORMAL, dnInfo.getAdminState());
+            Assertions.assertEquals(
+                    AdminStates.NORMAL, dnInfo.getAdminState(), "Node shouldn't be decommissioned");
         } else {
-          // check against decommissioned list
-          Assert.assertTrue(
-              "For block " + blk.getBlock() + " decommissioned node " + dnInfo
-                  + " is not last node in list: " + j + "th index of "
-                  + nodes.length,
-              decommissionedNodeList.contains(dnInfo.getXferAddr()));
-          Assert.assertEquals("Node should be decommissioned",
-              AdminStates.DECOMMISSIONED, dnInfo.getAdminState());
+            // check against decommissioned list
+            Assertions.assertTrue(
+                    decommissionedNodeList.contains(dnInfo.getXferAddr()),
+                    "For block " + blk.getBlock() + " decommissioned node " + dnInfo
+                            + " is not last node in list: " + j + "th index of "
+                            + nodes.length);
+            Assertions.assertEquals(
+                    AdminStates.DECOMMISSIONED, dnInfo.getAdminState(), "Node should be decommissioned");
         }
       }
     }
@@ -552,10 +552,10 @@
           locToTokenList.get(i);
       DatanodeInfo[] di = lb.getLocations();
       for (int j = 0; j < di.length; j++) {
-        Assert.assertEquals("Block index value mismatches after sorting",
-            (byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j]);
-        Assert.assertEquals("Block token value mismatches after sorting",
-            locToToken.get(di[j]), stripedBlk.getBlockTokens()[j]);
+          Assertions.assertEquals(
+                  (byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j], "Block index value mismatches after sorting");
+          Assertions.assertEquals(
+                  locToToken.get(di[j]), stripedBlk.getBlockTokens()[j], "Block token value mismatches after sorting");
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
index 0487c3f..658a033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -152,16 +152,16 @@
       DFSTestUtil.verifyClientStats(conf, cluster);
 
       bm.computeDatanodeWork();
-      assertTrue("The number of replication work pending before targets are " +
-              "determined should be non-negative.",
-          (Integer)Whitebox.getInternalState(secondDn,
-              "pendingReplicationWithoutTargets") >= 0);
+        assertTrue(
+                (Integer) Whitebox.getInternalState(secondDn,
+                        "pendingReplicationWithoutTargets") >= 0, "The number of replication work pending before targets are " +
+                "determined should be non-negative.");
 
       BlockManagerTestUtil.updateState(bm);
-      assertTrue("The number of blocks to be replicated should be less than "
-          + "or equal to " + bm.replicationStreamsHardLimit,
-          secondDn.getNumberOfBlocksToBeReplicated()
-          <= bm.replicationStreamsHardLimit);
+        assertTrue(
+                secondDn.getNumberOfBlocksToBeReplicated()
+                        <= bm.replicationStreamsHardLimit, "The number of blocks to be replicated should be less than "
+                + "or equal to " + bm.replicationStreamsHardLimit);
       DFSTestUtil.verifyClientStats(conf, cluster);
     } finally {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java
index e033f18..9f78404 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java
@@ -17,15 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.common;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 import java.io.IOException;
 import java.net.URI;
 
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.junit.Test;
 
 /**
  * This is a unit test, which tests {@link Util#stringAsURI(String)}
@@ -67,11 +67,11 @@
   public void testAbsolutePathAsURI() throws IOException {
     URI u = null;
     u = Util.stringAsURI(ABSOLUTE_PATH_WINDOWS);
-    assertNotNull(
-        "Uri should not be null for Windows path" + ABSOLUTE_PATH_WINDOWS, u);
+      assertNotNull(u,
+              "Uri should not be null for Windows path" + ABSOLUTE_PATH_WINDOWS);
     assertEquals(URI_FILE_SCHEMA, u.getScheme());
     u = Util.stringAsURI(ABSOLUTE_PATH_UNIX);
-    assertNotNull("Uri should not be null for Unix path" + ABSOLUTE_PATH_UNIX, u);
+      assertNotNull(u, "Uri should not be null for Unix path" + ABSOLUTE_PATH_UNIX);
     assertEquals(URI_FILE_SCHEMA, u.getScheme());
   }
 
@@ -83,15 +83,15 @@
   public void testURI() throws IOException {
     LOG.info("Testing correct Unix URI: " + URI_UNIX);
     URI u = Util.stringAsURI(URI_UNIX);
-    LOG.info("Uri: " + u);    
-    assertNotNull("Uri should not be null at this point", u);
+    LOG.info("Uri: " + u);
+      assertNotNull(u, "Uri should not be null at this point");
     assertEquals(URI_FILE_SCHEMA, u.getScheme());
     assertEquals(URI_PATH_UNIX, u.getPath());
 
     LOG.info("Testing correct windows URI: " + URI_WINDOWS);
     u = Util.stringAsURI(URI_WINDOWS);
     LOG.info("Uri: " + u);
-    assertNotNull("Uri should not be null at this point", u);
+      assertNotNull(u, "Uri should not be null at this point");
     assertEquals(URI_FILE_SCHEMA, u.getScheme());
     assertEquals(URI_PATH_WINDOWS.replace("%20", " "), u.getPath());
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java
index 34bc616..b11a60f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestHostRestrictingAuthorizationFilter.java
@@ -19,7 +19,7 @@
 
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
index e5746a0..2126da3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
@@ -39,9 +39,9 @@
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import javax.servlet.ServletContext;
@@ -49,7 +49,7 @@
 import java.io.IOException;
 import java.net.InetSocketAddress;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -59,7 +59,7 @@
   private final Configuration conf = new HdfsConfiguration();
 
   // allow user with TGT to run tests
-  @BeforeClass
+  @BeforeAll
   public static void setupKerb() {
     System.setProperty("java.security.krb5.kdc", "");
     System.setProperty("java.security.krb5.realm", "NONE");
@@ -142,7 +142,7 @@
     UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
     Token<? extends TokenIdentifier> tokenInUgi = ugi.getTokens().iterator()
         .next();
-    Assert.assertEquals(expected, tokenInUgi.getService().toString());
+    Assertions.assertEquals(expected, tokenInUgi.getService().toString());
   }
 
   @Test
@@ -168,9 +168,9 @@
     when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
         tokenString);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);
     checkUgiFromToken(ugi);
 
     // token with auth-ed user
@@ -178,9 +178,9 @@
     when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
         tokenString);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);    
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);    
     checkUgiFromToken(ugi);
     
     // completely different user, token trumps auth
@@ -188,9 +188,9 @@
     when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
         tokenString);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);    
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);    
     checkUgiFromToken(ugi);
     
     // expected case
@@ -198,9 +198,9 @@
     when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
         tokenString);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);    
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);    
     checkUgiFromToken(ugi);
 
     // if present token, ignore doas parameter
@@ -209,9 +209,9 @@
         tokenString);
 
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);
     checkUgiFromToken(ugi);
 
     // if present token, ignore user.name parameter
@@ -220,9 +220,9 @@
         tokenString);
 
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);
     checkUgiFromToken(ugi);
 
     // if present token, ignore user.name and doas parameter
@@ -231,9 +231,9 @@
         tokenString);
 
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);
     checkUgiFromToken(ugi);
 
   }
@@ -253,18 +253,18 @@
     request = getMockRequest(null, null, null);
     try {
       JspHelper.getUGI(context, request, conf);
-      Assert.fail("bad request allowed");
+      Assertions.fail("bad request allowed");
     } catch (IOException ioe) {
-      Assert.assertEquals(
+      Assertions.assertEquals(
           "Security enabled but user not authenticated by filter",
           ioe.getMessage());
     }
     request = getMockRequest(null, realUser, null);
     try {
       JspHelper.getUGI(context, request, conf);
-      Assert.fail("bad request allowed");
+      Assertions.fail("bad request allowed");
     } catch (IOException ioe) {
-      Assert.assertEquals(
+      Assertions.assertEquals(
           "Security enabled but user not authenticated by filter",
           ioe.getMessage());
     }
@@ -272,22 +272,22 @@
     // ugi for remote user
     request = getMockRequest(realUser, null, null);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getShortUserName(), realUser);
+    Assertions.assertNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getShortUserName(), realUser);
     checkUgiFromAuth(ugi);
     
     // ugi for remote user = real user
     request = getMockRequest(realUser, realUser, null);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getShortUserName(), realUser);
+    Assertions.assertNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getShortUserName(), realUser);
     checkUgiFromAuth(ugi);
     
     // if there is remote user via SPNEGO, ignore user.name param
     request = getMockRequest(realUser, user, null);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getShortUserName(), realUser);
+    Assertions.assertNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getShortUserName(), realUser);
     checkUgiFromAuth(ugi);
   }
   
@@ -312,18 +312,18 @@
     request = getMockRequest(null, null, user);
     try {
       JspHelper.getUGI(context, request, conf);
-      Assert.fail("bad request allowed");
+      Assertions.fail("bad request allowed");
     } catch (IOException ioe) {
-      Assert.assertEquals(
+      Assertions.assertEquals(
           "Security enabled but user not authenticated by filter",
           ioe.getMessage());
     }
     request = getMockRequest(null, realUser, user);
     try {
       JspHelper.getUGI(context, request, conf);
-      Assert.fail("bad request allowed");
+      Assertions.fail("bad request allowed");
     } catch (IOException ioe) {
-      Assert.assertEquals(
+      Assertions.assertEquals(
           "Security enabled but user not authenticated by filter",
           ioe.getMessage());
     }
@@ -331,25 +331,25 @@
     // proxy ugi for user via remote user
     request = getMockRequest(realUser, null, user);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);
     checkUgiFromAuth(ugi);
     
     // proxy ugi for user vi a remote user = real user
     request = getMockRequest(realUser, realUser, user);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);
     checkUgiFromAuth(ugi);
 
     // if there is remote user via SPNEGO, ignore user.name, doas param
     request = getMockRequest(realUser, user, user);
     ugi = JspHelper.getUGI(context, request, conf);
-    Assert.assertNotNull(ugi.getRealUser());
-    Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
-    Assert.assertEquals(ugi.getShortUserName(), user);
+    Assertions.assertNotNull(ugi.getRealUser());
+    Assertions.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
+    Assertions.assertEquals(ugi.getShortUserName(), user);
     checkUgiFromAuth(ugi);
 
 
@@ -358,18 +358,18 @@
     try {
       request = getMockRequest(user, null, realUser);
       JspHelper.getUGI(context, request, conf);
-      Assert.fail("bad proxy request allowed");
+      Assertions.fail("bad proxy request allowed");
     } catch (AuthorizationException ae) {
-      Assert.assertEquals(
+      Assertions.assertEquals(
           "User: " + user + " is not allowed to impersonate " + realUser,
            ae.getMessage());
     }
     try {
       request = getMockRequest(user, user, realUser);
       JspHelper.getUGI(context, request, conf);
-      Assert.fail("bad proxy request allowed");
+      Assertions.fail("bad proxy request allowed");
     } catch (AuthorizationException ae) {
-      Assert.assertEquals(
+      Assertions.assertEquals(
           "User: " + user + " is not allowed to impersonate " + realUser,
            ae.getMessage());
     }
@@ -420,24 +420,24 @@
   
   private void checkUgiFromAuth(UserGroupInformation ugi) {
     if (ugi.getRealUser() != null) {
-      Assert.assertEquals(AuthenticationMethod.PROXY,
+      Assertions.assertEquals(AuthenticationMethod.PROXY,
                           ugi.getAuthenticationMethod());
-      Assert.assertEquals(AuthenticationMethod.KERBEROS_SSL,
+      Assertions.assertEquals(AuthenticationMethod.KERBEROS_SSL,
                           ugi.getRealUser().getAuthenticationMethod());
     } else {
-      Assert.assertEquals(AuthenticationMethod.KERBEROS_SSL,
+      Assertions.assertEquals(AuthenticationMethod.KERBEROS_SSL,
                           ugi.getAuthenticationMethod()); 
     }
   }
   
   private void checkUgiFromToken(UserGroupInformation ugi) {
     if (ugi.getRealUser() != null) {
-      Assert.assertEquals(AuthenticationMethod.PROXY,
+      Assertions.assertEquals(AuthenticationMethod.PROXY,
                           ugi.getAuthenticationMethod());
-      Assert.assertEquals(AuthenticationMethod.TOKEN,
+      Assertions.assertEquals(AuthenticationMethod.TOKEN,
                           ugi.getRealUser().getAuthenticationMethod());
     } else {
-      Assert.assertEquals(AuthenticationMethod.TOKEN,
+      Assertions.assertEquals(AuthenticationMethod.TOKEN,
                           ugi.getAuthenticationMethod());
     }
   }
@@ -453,7 +453,7 @@
         in.reset(out.getData(), out.getLength());
         HdfsServerConstants.ReplicaState result = HdfsServerConstants.ReplicaState
             .read(in);
-        assertTrue("testReadWrite error !!!", repState == result);
+          assertTrue(repState == result, "testReadWrite error !!!");
         out.reset();
         in.reset();
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
index 4d03df1..7e66e03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
@@ -29,18 +29,13 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -65,10 +60,7 @@
   private Configuration conf;
   private final static String BPID = "BPID-0";
 
-  @Rule
-  public final ExpectedException exception = ExpectedException.none();
-
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     conf = new Configuration();
     int port = 9876;
@@ -87,7 +79,7 @@
     inMemoryLevelDBAliasMapClient = new InMemoryLevelDBAliasMapClient();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     levelDBAliasMapServer.close();
     inMemoryLevelDBAliasMapClient.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDBFileRegionAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDBFileRegionAliasMap.java
index a3c13e9..baffbca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDBFileRegionAliasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDBFileRegionAliasMap.java
@@ -21,14 +21,14 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.FileRegion;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.nio.file.Files;
 import java.util.Iterator;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
 
 /**
  * Tests for the {@link LevelDBFileRegionAliasMap}.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java
index 84b0d23..f1bdeff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestLevelDbMockAliasMapClient.java
@@ -27,9 +27,10 @@
 import org.apache.hadoop.hdfs.server.common.FileRegion;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.iq80.leveldb.DBException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Files;
@@ -50,7 +51,7 @@
   private InMemoryAliasMap aliasMapMock;
   private final String bpid = "BPID-0";
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     aliasMapMock = mock(InMemoryAliasMap.class);
     when(aliasMapMock.getBlockPoolId()).thenReturn(bpid);
@@ -72,7 +73,7 @@
     inMemoryLevelDBAliasMapClient.setConf(conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     levelDBAliasMapServer.close();
     inMemoryLevelDBAliasMapClient.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestTextBlockAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestTextBlockAliasMap.java
index 29c53e7..92950b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestTextBlockAliasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestTextBlockAliasMap.java
@@ -32,10 +32,10 @@
 import org.apache.hadoop.io.compress.CompressionCodec;
 
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap.fileNameFromBlockPoolID;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test for the text based block format for provided block maps.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
index 1a65d2f..d1bca83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
@@ -18,9 +18,8 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 
@@ -39,6 +38,8 @@
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeoutException;
 
+import static org.hamcrest.MatcherAssert.assertThat;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -69,10 +70,10 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.slf4j.event.Level;
@@ -112,14 +113,14 @@
     resetConfiguration();
   }
 
-  @Before
+  @BeforeEach
   public void startUpCluster() throws IOException {
     REPL_FACTOR = 1; //Reset if case a test has modified the value
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (fs != null) {
       fs.close();
@@ -255,8 +256,8 @@
 
     for (int i = 0; i < blocksAfterReport.size(); i++) {
       ExtendedBlock b = blocksAfterReport.get(i).getBlock();
-      assertEquals("Length of " + i + "th block is incorrect",
-        oldLengths[i], b.getNumBytes());
+        assertEquals(
+                oldLengths[i], b.getNumBytes(), "Length of " + i + "th block is incorrect");
     }
   }
 
@@ -332,10 +333,10 @@
 
     printStats();
 
-    assertEquals("Wrong number of MissingBlocks is found",
-      blocks2Remove.size(), cluster.getNamesystem().getMissingBlocksCount());
-    assertEquals("Wrong number of UnderReplicatedBlocks is found",
-      blocks2Remove.size(), cluster.getNamesystem().getUnderReplicatedBlocks());
+      assertEquals(
+              blocks2Remove.size(), cluster.getNamesystem().getMissingBlocksCount(), "Wrong number of MissingBlocks is found");
+      assertEquals(
+              blocks2Remove.size(), cluster.getNamesystem().getUnderReplicatedBlocks(), "Wrong number of UnderReplicatedBlocks is found");
   }
 
 
@@ -426,8 +427,8 @@
     StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
     sendBlockReports(dnR, poolId, reports);
     printStats();
-    assertEquals("Wrong number of PendingReplication Blocks",
-      0, cluster.getNamesystem().getUnderReplicatedBlocks());
+      assertEquals(
+              0, cluster.getNamesystem().getUnderReplicatedBlocks(), "Wrong number of PendingReplication Blocks");
   }
 
   /**
@@ -524,8 +525,8 @@
       StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
       sendBlockReports(dnR, poolId, reports);
       printStats();
-      assertEquals("Wrong number of PendingReplication blocks",
-        blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
+        assertEquals(
+                blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks(), "Wrong number of PendingReplication blocks");
 
       try {
         bc.join();
@@ -567,8 +568,8 @@
       StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
       sendBlockReports(dnR, poolId, reports);
       printStats();
-      assertEquals("Wrong number of PendingReplication blocks",
-        2, cluster.getNamesystem().getPendingReplicationBlocks());
+        assertEquals(
+                2, cluster.getNamesystem().getPendingReplicationBlocks(), "Wrong number of PendingReplication blocks");
 
       try {
         bc.join();
@@ -696,7 +697,7 @@
     executorService.shutdown();
 
     // Verify that the storages match before and after the test
-    Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
+    Assertions.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
   }
 
   private void waitForTempReplica(Block bl, int DN_N1) throws IOException {
@@ -729,8 +730,8 @@
           LOG.debug("Has been waiting for " + waiting_period + " ms.");
         }
       if (waiting_period > TIMEOUT)
-        assertTrue("Was waiting too long to get ReplicaInfo from a datanode",
-          tooLongWait);
+          assertTrue(
+                  tooLongWait, "Was waiting too long to get ReplicaInfo from a datanode");
     }
 
     HdfsServerConstants.ReplicaState state = r.getState();
@@ -746,8 +747,8 @@
             " is in state " + state.getValue());
       }
       if (Time.monotonicNow() - start > TIMEOUT)
-        assertTrue("Was waiting too long for a replica to become TEMPORARY",
-          tooLongWait);
+          assertTrue(
+                  tooLongWait, "Was waiting too long for a replica to become TEMPORARY");
     }
     if(LOG.isDebugEnabled()) {
       LOG.debug("Replica state after the loop " + state.getValue());
@@ -910,7 +911,7 @@
         startDNandWait(filePath, true);
       } catch (Exception e) {
         e.printStackTrace();
-        Assert.fail("Failed to start BlockChecker: " + e);
+        Assertions.fail("Failed to start BlockChecker: " + e);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index dba5a14..e2fa0a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -39,8 +39,8 @@
 
 import java.util.function.Supplier;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
 
 /**
  * DO NOT ADD MOCKITO IMPORTS HERE Or Downstream projects may not
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
index 30fee2f..2b2d214 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
@@ -41,7 +41,7 @@
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -172,7 +172,7 @@
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals(nnSocketAddr, nnAddr);
+        Assertions.assertEquals(nnSocketAddr, nnAddr);
         return namenode;
       }
     };
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index fc2a998..c8c48d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -29,12 +29,7 @@
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -86,8 +81,8 @@
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -128,7 +123,7 @@
   private DataNode mockDn;
   private FsDatasetSpi<?> mockFSDataset;
   
-  @Before
+  @BeforeEach
   public void setupMocks() throws Exception {
     mockNN1 = setupNNMock(0);
     mockNN2 = setupNNMock(1);
@@ -793,9 +788,9 @@
           .getStorageType());
       Thread.sleep(10000);
       long difference = secondCallTime - firstCallTime;
-      assertTrue("Active namenode reportBadBlock processing should be "
-          + "independent of standby namenode reportBadBlock processing ",
-          difference < 5000);
+        assertTrue(
+                difference < 5000, "Active namenode reportBadBlock processing should be "
+                + "independent of standby namenode reportBadBlock processing ");
     } finally {
       bpos.stop();
       bpos.join();
@@ -833,9 +828,9 @@
       bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
       Thread.sleep(10000);
       long difference = secondCallTime - firstCallTime;
-      assertTrue("Active namenode trySendErrorReport processing "
-          + "should be independent of standby namenode trySendErrorReport"
-          + " processing ", difference < 5000);
+        assertTrue(difference < 5000, "Active namenode trySendErrorReport processing "
+                + "should be independent of standby namenode trySendErrorReport"
+                + " processing ");
     } finally {
       bpos.stop();
       bpos.join();
@@ -873,8 +868,8 @@
       String errorString = "Can't send invalid block " + FAKE_BLOCK;
       bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
       GenericTestUtils.waitFor(() -> secondCallTime != 0, 100, 20000);
-      assertTrue("Active namenode didn't add the report back to the queue "
-          + "when errorReport threw IOException", secondCallTime != 0);
+        assertTrue(secondCallTime != 0, "Active namenode didn't add the report back to the queue "
+                + "when errorReport threw IOException");
     } finally {
       bpos.stop();
       bpos.join();
@@ -987,9 +982,9 @@
       // Send register command back to Datanode to reRegister().
       // After reRegister IBRs should be cleared.
       datanodeCommands[1] = new DatanodeCommand[] { new RegisterCommand() };
-      assertEquals(
-          "IBR size before reRegister should be non-0", 1, getStandbyIBRSize(
-              bpos));
+        assertEquals(1, getStandbyIBRSize(
+                bpos),
+                "IBR size before reRegister should be non-0");
       bpos.triggerHeartbeatForTests();
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         @Override
@@ -1197,8 +1192,8 @@
       DFSTestUtil.createFile(fs, file, 10240L, (short)1, 0L);
 
       MetricsRecordBuilder mrb = getMetrics(datanode.getMetrics().name());
-      assertTrue("Process command nums is not expected.",
-          getLongCounter("NumProcessedCommands", mrb) > 0);
+        assertTrue(
+                getLongCounter("NumProcessedCommands", mrb) > 0, "Process command nums is not expected.");
       assertEquals(0, getLongCounter("SumOfActorCommandQueueLength", mrb));
       // Check new metric result about processedCommandsOp.
       // One command send back to DataNode here is #FinalizeCommand.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java
index f440750..3365f93 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java
@@ -45,8 +45,8 @@
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.util.Time;
 import org.slf4j.event.Level;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test verifies that incremental block reports are sent in batch mode
@@ -161,7 +161,7 @@
         });
       }
       for(int i = 0; i < NUM_FILES; i++) {
-        Assert.assertTrue(verifyService.take().get());
+        Assertions.assertTrue(verifyService.take().get());
       }
       final long testEndTime = Time.monotonicNow();
 
@@ -247,7 +247,7 @@
       for(int i = 0; i < numBlocks; i++) {
         in.read(computed);
         nextBytes(i, seed, expected);
-        Assert.assertArrayEquals(expected, computed);
+        Assertions.assertArrayEquals(expected, computed);
       }
       return true;
     } catch(Exception e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockCountersInPendingIBR.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockCountersInPendingIBR.java
index 6f5e620..ececeb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockCountersInPendingIBR.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockCountersInPendingIBR.java
@@ -36,7 +36,7 @@
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.MetricsAsserts;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
index 958b3e4..55d1565 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
@@ -40,13 +40,13 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
 
 /**
  * This test verifies NameNode behavior when it gets unexpected block reports
@@ -68,7 +68,7 @@
   private DFSClient client;
   private String bpid;
 
-  @Before
+  @BeforeEach
   public void startUpCluster() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf)
@@ -79,7 +79,7 @@
     bpid = cluster.getNamesystem().getBlockPoolId();
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (cluster != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
index a4f5071..0b2d3f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -32,8 +32,9 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -47,7 +48,7 @@
   private final StringBuilder log = new StringBuilder();
   private int mockIdx = 1;
   
-  @Before
+  @BeforeEach
   public void setupBPM() {
     bpm = new BlockPoolManager(mockDN){
 
@@ -159,9 +160,9 @@
     bpm.refreshNamenodes(conf);
     assertEquals("create #1\n", log.toString());
     Map<String, BPOfferService> map = bpm.getBpByNameserviceId();
-    Assert.assertFalse(map.containsKey("ns2"));
-    Assert.assertFalse(map.containsKey("ns3"));
-    Assert.assertTrue(map.containsKey("ns1"));
+    Assertions.assertFalse(map.containsKey("ns2"));
+    Assertions.assertFalse(map.containsKey("ns3"));
+    Assertions.assertTrue(map.containsKey("ns1"));
     log.setLength(0);
   }
 
@@ -179,18 +180,18 @@
             "create #2\n" +
             "create #3\n", log.toString());
     Map<String, BPOfferService> map = bpm.getBpByNameserviceId();
-    Assert.assertTrue(map.containsKey("ns1"));
-    Assert.assertTrue(map.containsKey("ns2"));
-    Assert.assertTrue(map.containsKey("ns3"));
-    Assert.assertEquals(2, map.get("ns3").getBPServiceActors().size());
+    Assertions.assertTrue(map.containsKey("ns1"));
+    Assertions.assertTrue(map.containsKey("ns2"));
+    Assertions.assertTrue(map.containsKey("ns3"));
+    Assertions.assertEquals(2, map.get("ns3").getBPServiceActors().size());
     Assert.assertEquals("ns3-" +  MockDomainNameResolver.FQDN_1 + "-8020",
         map.get("ns3").getBPServiceActors().get(0).getNnId());
     Assert.assertEquals("ns3-" +  MockDomainNameResolver.FQDN_2 + "-8020",
         map.get("ns3").getBPServiceActors().get(1).getNnId());
-    Assert.assertEquals(
+    Assertions.assertEquals(
         new InetSocketAddress(MockDomainNameResolver.FQDN_1, 8020),
         map.get("ns3").getBPServiceActors().get(0).getNNSocketAddress());
-    Assert.assertEquals(
+    Assertions.assertEquals(
         new InetSocketAddress(MockDomainNameResolver.FQDN_2, 8020),
         map.get("ns3").getBPServiceActors().get(1).getNNSocketAddress());
     log.setLength(0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
index 5e850b9..04cee8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
@@ -27,13 +27,13 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
 
 /**
  * Test that BlockPoolSliceStorage can correctly generate trash and
  * restore directories for a given block file path.
-*/
+ */
 public class TestBlockPoolSliceStorage {
 
   public static final Logger LOG = LoggerFactory
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 995a135..6299e9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
@@ -93,11 +93,11 @@
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -170,7 +170,7 @@
    * Starts an instance of DataNode
    * @throws IOException
    */
-  @Before
+  @BeforeEach
   public void startUp() throws IOException, URISyntaxException {
     tearDownDone = false;
     conf = new HdfsConfiguration();
@@ -228,7 +228,7 @@
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals(NN_ADDR, nnAddr);
+        Assertions.assertEquals(NN_ADDR, nnAddr);
         return namenode;
       }
     };
@@ -259,15 +259,15 @@
     } catch (InterruptedException e) {
       LOG.warn("InterruptedException while waiting to see active NN", e);
     }
-    Assert.assertNotNull("Failed to get ActiveNN",
-        dn.getAllBpOs().get(0).getActiveNN());
+      Assertions.assertNotNull(
+              dn.getAllBpOs().get(0).getActiveNN(), "Failed to get ActiveNN");
   }
 
   /**
    * Cleans the resources and closes the instance of datanode
    * @throws IOException if an error occurred
    */
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (!tearDownDone && dn != null) {
       try {
@@ -277,8 +277,8 @@
       } finally {
         File dir = new File(DATA_DIR);
         if (dir.exists())
-          Assert.assertTrue(
-              "Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
+            Assertions.assertTrue(FileUtil.fullyDelete(dir),
+                    "Cannot delete data-node dirs");
       }
       tearDownDone = true;
     }
@@ -344,9 +344,9 @@
 
     try {
       testSyncReplicas(replica1, replica2, dn1, dn2);
-      Assert.fail("Two finalized replicas should not have different lengthes!");
+      Assertions.fail("Two finalized replicas should not have different lengthes!");
     } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           "Inconsistent size of finalized replicas. "));
     }
   }
@@ -724,9 +724,9 @@
     } catch (IOException e) {
       // expect IOException to be thrown here
       e.printStackTrace();
-      assertTrue("Wrong exception was thrown: " + e.getMessage(),
-          e.getMessage().contains("Found 1 replica(s) for block " + block +
-          " but none is in RWR or better state"));
+        assertTrue(
+                e.getMessage().contains("Found 1 replica(s) for block " + block +
+                        " but none is in RWR or better state"), "Wrong exception was thrown: " + e.getMessage());
       exceptionThrown = true;
     } finally {
       assertTrue(exceptionThrown);
@@ -752,8 +752,8 @@
             blockLengths[id], 0, null);
         syncList.put((long) id, new BlockRecord(null, null, rInfo));
       }
-      Assert.assertEquals("BLOCK_LENGTHS_SUITE[" + i + "]", safeLength,
-          recoveryTask.getSafeLength(syncList));
+        Assertions.assertEquals(safeLength,
+                recoveryTask.getSafeLength(syncList), "BLOCK_LENGTHS_SUITE[" + i + "]");
     }
   }
 
@@ -885,7 +885,7 @@
     // We need a long value for the data xceiver stop timeout.
     // Otherwise the timeout will trigger, and we will not have tested that
     // thread join was done locklessly.
-    Assert.assertEquals(
+    Assertions.assertEquals(
         TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS,
         dn.getDnConf().getXceiverStopTimeout());
     final TestStopWorkerSemaphore progressParent =
@@ -966,7 +966,7 @@
     // unit test framework, so we have to do it manually here.
     String failureReason = failure.get();
     if (failureReason != null) {
-      Assert.fail("Thread failure: " + failureReason);
+      Assertions.fail("Thread failure: " + failureReason);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java
index 8d2df18..944a18c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java
@@ -47,11 +47,11 @@
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.AutoCloseableLock;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -76,7 +76,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.anyString;
@@ -117,7 +117,7 @@
    * Starts an instance of DataNode.
    * @throws IOException
    */
-  @Before
+  @BeforeEach
   public void startUp() throws IOException {
     tearDownDone = false;
     conf = new HdfsConfiguration();
@@ -167,7 +167,7 @@
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals(NN_ADDR, nnAddr);
+        Assertions.assertEquals(NN_ADDR, nnAddr);
         return namenode;
       }
     };
@@ -191,15 +191,15 @@
     } catch (InterruptedException e) {
       LOG.warn("InterruptedException while waiting to see active NN", e);
     }
-    Assert.assertNotNull("Failed to get ActiveNN",
-        dn.getAllBpOs().get(0).getActiveNN());
+      Assertions.assertNotNull(
+              dn.getAllBpOs().get(0).getActiveNN(), "Failed to get ActiveNN");
   }
 
   /**
    * Cleans the resources and closes the instance of datanode.
    * @throws IOException if an error occurred
    */
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (!tearDownDone && dn != null) {
       try {
@@ -209,8 +209,8 @@
       } finally {
         File dir = new File(DATA_DIR);
         if (dir.exists()) {
-          Assert.assertTrue(
-              "Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
+            Assertions.assertTrue(FileUtil.fullyDelete(dir),
+                    "Cannot delete data-node dirs");
         }
       }
       tearDownDone = true;
@@ -265,13 +265,13 @@
       try {
         out.close();
       } catch (IOException e) {
-        Assert.assertTrue("Writing should fail",
-            e.getMessage().contains("are bad. Aborting..."));
+          Assertions.assertTrue(
+                  e.getMessage().contains("are bad. Aborting..."), "Writing should fail");
       } finally {
         recoveryThread.join();
       }
-      Assert.assertTrue("Recovery should be initiated successfully",
-          recoveryInitResult.get());
+        Assertions.assertTrue(
+                recoveryInitResult.get(), "Recovery should be initiated successfully");
 
       dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock()
               .getGenerationStamp() + 1, block.getBlock().getBlockId(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
index 3f99f1b..064af70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -229,7 +226,7 @@
 
       LocatedBlock lb = dfs.getClient().getLocatedBlocks(fileName, 0).get(0);
       DatanodeInfo[] oldNodes = lb.getLocations();
-      assertEquals("Wrong block locations", oldNodes.length, 1);
+        assertEquals(oldNodes.length, 1, "Wrong block locations");
       DatanodeInfo source = oldNodes[0];
       ExtendedBlock b = lb.getBlock();
 
@@ -243,10 +240,10 @@
         }
       }
 
-      assertNotNull("Failed to choose destination datanode!", destin);
+        assertNotNull(destin, "Failed to choose destination datanode!");
 
-      assertFalse("Source and destin datanode should be different",
-          source.equals(destin));
+        assertFalse(
+                source.equals(destin), "Source and destin datanode should be different");
 
       // Mock FsDatasetSpi#getPinning to show that the block is pinned.
       for (int i = 0; i < cluster.getDataNodes().size(); i++) {
@@ -255,10 +252,10 @@
         InternalDataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
       }
 
-      // Block movement to a different datanode should fail as the block is
-      // pinned.
-      assertTrue("Status code mismatches!", replaceBlock(b, source, source,
-          destin, StorageType.ARCHIVE, Status.ERROR_BLOCK_PINNED));
+        // Block movement to a different datanode should fail as the block is
+        // pinned.
+        assertTrue(replaceBlock(b, source, source,
+                destin, StorageType.ARCHIVE, Status.ERROR_BLOCK_PINNED), "Status code mismatches!");
     } finally {
       cluster.shutdown();
     }
@@ -299,10 +296,10 @@
       locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
       // get the current 
       locatedBlock = locatedBlocks.get(0);
-      assertEquals("Storage should be only one", 1,
-          locatedBlock.getLocations().length);
-      assertTrue("Block should be moved to ARCHIVE", locatedBlock
-          .getStorageTypes()[0] == StorageType.ARCHIVE);
+        assertEquals(1,
+                locatedBlock.getLocations().length, "Storage should be only one");
+        assertTrue(locatedBlock
+                .getStorageTypes()[0] == StorageType.ARCHIVE, "Block should be moved to ARCHIVE");
     } finally {
       cluster.shutdown();
     }
@@ -398,14 +395,14 @@
     DFSClient client = null;
     try {
       cluster.waitActive();
-      assertEquals("Number of namenodes is not 2", 2,
-          cluster.getNumNameNodes());
+        assertEquals(2,
+                cluster.getNumNameNodes(), "Number of namenodes is not 2");
       // Transitioning the namenode 0 to active.
       cluster.transitionToActive(0);
-      assertTrue("Namenode 0 should be in active state",
-          cluster.getNameNode(0).isActiveState());
-      assertTrue("Namenode 1 should be in standby state",
-          cluster.getNameNode(1).isStandbyState());
+        assertTrue(
+                cluster.getNameNode(0).isActiveState(), "Namenode 0 should be in active state");
+        assertTrue(
+                cluster.getNameNode(1).isStandbyState(), "Namenode 1 should be in standby state");
 
       // Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover
       // to true.
@@ -430,8 +427,8 @@
 
       // add a second datanode to the cluster
       cluster.startDataNodes(conf, 1, true, null, null, null, null);
-      assertEquals("Number of datanodes should be 2", 2,
-          cluster.getDataNodes().size());
+        assertEquals(2,
+                cluster.getDataNodes().size(), "Number of datanodes should be 2");
 
       DataNode dn0 = cluster.getDataNodes().get(0);
       DataNode dn1 = cluster.getDataNodes().get(1);
@@ -464,10 +461,10 @@
       cluster.transitionToStandby(0);
       cluster.transitionToActive(1);
 
-      assertTrue("Namenode 1 should be in active state",
-         cluster.getNameNode(1).isActiveState());
-      assertTrue("Namenode 0 should be in standby state",
-         cluster.getNameNode(0).isStandbyState());
+        assertTrue(
+                cluster.getNameNode(1).isActiveState(), "Namenode 1 should be in active state");
+        assertTrue(
+                cluster.getNameNode(0).isStandbyState(), "Namenode 0 should be in standby state");
       client.close();
 
       // Opening a new client for new active  namenode
@@ -476,8 +473,8 @@
           .getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
 
       assertEquals(1, locatedBlocks1.size());
-      assertEquals("The block should be only on 1 datanode ", 1,
-          locatedBlocks1.get(0).getLocations().length);
+        assertEquals(1,
+                locatedBlocks1.get(0).getLocations().length, "The block should be only on 1 datanode ");
     } finally {
       IOUtils.cleanupWithLogger(null, client);
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
index fdbcb51..ee44330 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
@@ -24,10 +24,7 @@
 import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS;
 import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER;
 import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.Closeable;
 import java.io.File;
@@ -62,9 +59,9 @@
 import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.Statistics;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -73,7 +70,7 @@
   public static final Logger LOG =
       LoggerFactory.getLogger(TestBlockScanner.class);
 
-  @Before
+  @BeforeEach
   public void before() {
     BlockScanner.Conf.allowUnitTestSettings = true;
     GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.TRACE);
@@ -220,7 +217,7 @@
             assertEquals(savedBlock, loadedBlock);
           }
           boolean blockRemoved = blocks.remove(block);
-          assertTrue("Found unknown block " + block, blockRemoved);
+            assertTrue(blockRemoved, "Found unknown block " + block);
           if (blocksProcessed > (numFiles / 3)) {
             if (!testedSave) {
               LOG.info("Processed {} blocks out of {}.  Saving iterator.",
@@ -284,7 +281,7 @@
     disableBlockScanner(conf);
     TestContext ctx = new TestContext(conf, 1);
     try {
-      Assert.assertFalse(ctx.datanode.getBlockScanner().isEnabled());
+      Assertions.assertFalse(ctx.datanode.getBlockScanner().isEnabled());
     } finally {
       ctx.close();
     }
@@ -488,10 +485,10 @@
       // Should scan no more than one block a second.
       long seconds = ((endMs + 999 - startMs) / 1000);
       long maxBlocksScanned = seconds * 1;
-      assertTrue("The number of blocks scanned is too large.  Scanned " +
-          info.blocksScanned + " blocks; only expected to scan at most " +
-          maxBlocksScanned + " in " + seconds + " seconds.",
-          info.blocksScanned <= maxBlocksScanned);
+        assertTrue(
+                info.blocksScanned <= maxBlocksScanned, "The number of blocks scanned is too large.  Scanned " +
+                info.blocksScanned + " blocks; only expected to scan at most " +
+                maxBlocksScanned + " in " + seconds + " seconds.");
     }
     ctx.close();
   }
@@ -573,8 +570,8 @@
     URI vURI = ctx.volumes.get(0).getStorageLocation().getUri();
     File cursorPath = new File(new File(new File(new File(vURI), "current"),
           ctx.bpids[0]), "scanner.cursor");
-    assertTrue("Failed to find cursor save file in " +
-        cursorPath.getAbsolutePath(), cursorPath.exists());
+      assertTrue(cursorPath.exists(), "Failed to find cursor save file in " +
+              cursorPath.getAbsolutePath());
     Set<ExtendedBlock> prevGoodBlocks = new HashSet<ExtendedBlock>();
     synchronized (info) {
       info.sem = new Semaphore(4);
@@ -687,35 +684,35 @@
     arr.add("3");
     arr.add("5");
     arr.add("7");
-    Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "2"));
-    Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "1"));
-    Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, ""));
-    Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, null));
-    Assert.assertEquals(null, FsVolumeImpl.nextSorted(arr, "9"));
+    Assertions.assertEquals("3", FsVolumeImpl.nextSorted(arr, "2"));
+    Assertions.assertEquals("3", FsVolumeImpl.nextSorted(arr, "1"));
+    Assertions.assertEquals("1", FsVolumeImpl.nextSorted(arr, ""));
+    Assertions.assertEquals("1", FsVolumeImpl.nextSorted(arr, null));
+    Assertions.assertEquals(null, FsVolumeImpl.nextSorted(arr, "9"));
   }
 
   @Test(timeout=120000)
   public void testCalculateNeededBytesPerSec() throws Exception {
     // If we didn't check anything the last hour, we should scan now.
-    Assert.assertTrue(
+    Assertions.assertTrue(
         VolumeScanner.calculateShouldScan("test", 100, 0, 0, 60));
 
     // If, on average, we checked 101 bytes/s checked during the last hour,
     // stop checking now.
-    Assert.assertFalse(VolumeScanner.
+    Assertions.assertFalse(VolumeScanner.
         calculateShouldScan("test", 100, 101 * 3600, 1000, 5000));
 
     // Target is 1 byte / s, but we didn't scan anything in the last minute.
     // Should scan now.
-    Assert.assertTrue(VolumeScanner.
+    Assertions.assertTrue(VolumeScanner.
         calculateShouldScan("test", 1, 3540, 0, 60));
 
     // Target is 1000000 byte / s, but we didn't scan anything in the last
     // minute.  Should scan now.
-    Assert.assertTrue(VolumeScanner.
+    Assertions.assertTrue(VolumeScanner.
         calculateShouldScan("test", 100000L, 354000000L, 0, 60));
 
-    Assert.assertFalse(VolumeScanner.
+    Assertions.assertFalse(VolumeScanner.
         calculateShouldScan("test", 100000L, 365000000L, 0, 60));
   }
 
@@ -760,10 +757,10 @@
     }, 50, 30000);
     // We should have scanned 4 blocks
     synchronized (info) {
-      assertEquals("Expected 4 good blocks.", 4, info.goodBlocks.size());
+        assertEquals(4, info.goodBlocks.size(), "Expected 4 good blocks.");
       info.goodBlocks.clear();
-      assertEquals("Expected 4 blocksScanned", 4, info.blocksScanned);
-      assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
+        assertEquals(4, info.blocksScanned, "Expected 4 blocksScanned");
+        assertEquals(0, info.badBlocks.size(), "Did not expect bad blocks.");
       info.blocksScanned = 0;
     }
     ExtendedBlock first = ctx.getFileBlock(0, 0);
@@ -793,11 +790,11 @@
     }, 50, 30000);
 
     synchronized (info) {
-      assertTrue("Expected block " + first + " to have been scanned.",
-          info.goodBlocks.contains(first));
+        assertTrue(
+                info.goodBlocks.contains(first), "Expected block " + first + " to have been scanned.");
       assertEquals(2, info.goodBlocks.size());
       info.goodBlocks.clear();
-      assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
+        assertEquals(0, info.badBlocks.size(), "Did not expect bad blocks.");
       assertEquals(2, info.blocksScanned);
       info.blocksScanned = 0;
     }
@@ -825,12 +822,12 @@
       assertEquals(5, info.goodBlocks.size());
       assertEquals(0, info.badBlocks.size());
       assertEquals(5, info.blocksScanned);
-      // We should not have rescanned the "suspect block",
-      // because it was recently rescanned by the suspect block system.
-      // This is a test of the "suspect block" rate limiting.
-      Assert.assertFalse("We should not " +
-          "have rescanned block " + first + ", because it should have been " +
-          "in recentSuspectBlocks.", info.goodBlocks.contains(first));
+        // We should not have rescanned the "suspect block",
+        // because it was recently rescanned by the suspect block system.
+        // This is a test of the "suspect block" rate limiting.
+        Assertions.assertFalse(info.goodBlocks.contains(first), "We should not " +
+                "have rescanned block " + first + ", because it should have been " +
+                "in recentSuspectBlocks.");
       info.blocksScanned = 0;
     }
     ctx.close();
@@ -883,10 +880,10 @@
     synchronized (info) {
       assertFalse(info.goodBlocks.contains(unreachableBlock));
       assertFalse(info.badBlocks.contains(unreachableBlock));
-      assertEquals("Expected 3 good blocks.", 3, info.goodBlocks.size());
+        assertEquals(3, info.goodBlocks.size(), "Expected 3 good blocks.");
       info.goodBlocks.clear();
-      assertEquals("Expected 3 blocksScanned", 3, info.blocksScanned);
-      assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
+        assertEquals(3, info.blocksScanned, "Expected 3 blocksScanned");
+        assertEquals(0, info.badBlocks.size(), "Did not expect bad blocks.");
       info.blocksScanned = 0;
     }
     info.sem.release(1);
@@ -979,12 +976,12 @@
     }, 1000, 30000);
 
     synchronized (info) {
-      assertEquals("Expected 1 good block.",
-          numExpectedBlocks, info.goodBlocks.size());
+        assertEquals(
+                numExpectedBlocks, info.goodBlocks.size(), "Expected 1 good block.");
       info.goodBlocks.clear();
-      assertEquals("Expected 1 blocksScanned",
-          numExpectedBlocks, info.blocksScanned);
-      assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
+        assertEquals(
+                numExpectedBlocks, info.blocksScanned, "Expected 1 blocksScanned");
+        assertEquals(0, info.badBlocks.size(), "Did not expect bad blocks.");
       info.blocksScanned = 0;
     }
   }
@@ -1020,8 +1017,8 @@
       info.shouldRun = false;
       info.notify();
     }
-    assertEquals("Should not scan block accessed in last period",
-        0, info.blocksScanned);
+      assertEquals(
+              0, info.blocksScanned, "Should not scan block accessed in last period");
     ctx.close();
   }
 
@@ -1092,18 +1089,18 @@
       long totalTimeShutdown = endShutdownTime - startShutdownTime;
 
       if (isFastShutdown) {
-        assertTrue("total shutdown time of DN must be smaller than "
-                + "VolumeScanner Response time: " + totalTimeShutdown,
-            totalTimeShutdown < delayMS
-                && totalTimeShutdown >= joinTimeOutMS);
+          assertTrue(
+                  totalTimeShutdown < delayMS
+                          && totalTimeShutdown >= joinTimeOutMS, "total shutdown time of DN must be smaller than "
+                  + "VolumeScanner Response time: " + totalTimeShutdown);
         // wait for scanners to terminate before we move to the next test.
         injectDelay.waitForScanners();
         return;
       }
-      assertTrue("total shutdown time of DN must be larger than " +
-              "VolumeScanner Response time: " + totalTimeShutdown,
-          totalTimeShutdown >= delayMS
-              && totalTimeShutdown < joinTimeOutMS);
+        assertTrue(
+                totalTimeShutdown >= delayMS
+                        && totalTimeShutdown < joinTimeOutMS, "total shutdown time of DN must be larger than " +
+                "VolumeScanner Response time: " + totalTimeShutdown);
     } finally {
       // restore the VolumeScanner callback injector.
       VolumeScannerCBInjector.set(prevVolumeScannerCBInject);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
index b07e9e4..17bcbfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
@@ -22,7 +22,7 @@
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.server.datanode.BPServiceActor.Scheduler;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.util.Arrays;
@@ -30,11 +30,9 @@
 import java.util.Random;
 
 import static java.lang.Math.abs;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
index 011df46..e4a67c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
@@ -43,9 +43,9 @@
 
 import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
 
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestCachingStrategy {
   private static final Logger LOG =
@@ -56,7 +56,7 @@
   private final static TestRecordingCacheTracker tracker =
       new TestRecordingCacheTracker();
 
-  @BeforeClass
+  @BeforeAll
   public static void setupTest() {
     EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
 
@@ -239,7 +239,7 @@
       // read file
       readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, true);
       // verify that we dropped everything from the cache.
-      Assert.assertNotNull(stats);
+      Assertions.assertNotNull(stats);
       stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
     } finally {
       if (cluster != null) {
@@ -284,7 +284,7 @@
       // read file
       readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, null);
       // verify that we dropped everything from the cache.
-      Assert.assertNotNull(stats);
+      Assertions.assertNotNull(stats);
       stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
     } finally {
       if (cluster != null) {
@@ -361,7 +361,7 @@
           TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
       String fadvisedFileName = cluster.getBlockFile(0, block).getName();
       Stats stats = tracker.getStats(fadvisedFileName);
-      Assert.assertNull(stats);
+      Assertions.assertNull(stats);
       
       // read file
       readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, false);
@@ -388,7 +388,7 @@
       createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
       // verify that we can seek after setDropBehind
       try (FSDataInputStream fis = fs.open(new Path(TEST_PATH))) {
-        Assert.assertTrue(fis.read() != -1); // create BlockReader
+        Assertions.assertTrue(fis.read() != -1); // create BlockReader
         fis.setDropBehind(false); // clear BlockReader
         fis.seek(2); // seek
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java
index a71dbdb..878b333 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCorruptMetadataFile.java
@@ -28,14 +28,14 @@
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.File;
 import java.io.RandomAccessFile;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Tests to ensure that a block is not read successfully from a datanode
@@ -47,7 +47,7 @@
   private MiniDFSCluster.Builder clusterBuilder;
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     // Reduce block acquire retries as we only have 1 DN and it allows the
@@ -57,7 +57,7 @@
     clusterBuilder = new MiniDFSCluster.Builder(conf).numDataNodes(1);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDNUsageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDNUsageReport.java
index 8587f9d..7242362 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDNUsageReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDNUsageReport.java
@@ -21,10 +21,10 @@
 
 import org.apache.hadoop.hdfs.server.protocol.DataNodeUsageReport;
 import org.apache.hadoop.hdfs.server.protocol.DataNodeUsageReportUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test class for {@link DataNodeUsageReport}.
@@ -40,12 +40,12 @@
   private long readBlock;
   private long timeSinceLastReport;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     dnUsageUtil = new DataNodeUsageReportUtil();
   }
 
-  @After
+  @AfterEach
   public void clear() throws IOException {
     dnUsageUtil = null;
   }
@@ -60,7 +60,7 @@
     // Test1
     DataNodeUsageReport report = dnUsageUtil.getUsageReport(0,
         0, 0, 0, 0, 0, 0);
-    Assert.assertEquals(report, DataNodeUsageReport.EMPTY_REPORT);
+    Assertions.assertEquals(report, DataNodeUsageReport.EMPTY_REPORT);
 
     // Test2
     bytesWritten = 200;
@@ -74,22 +74,22 @@
         bytesRead, writeTime, readTime, writeBlock, readBlock,
         timeSinceLastReport);
 
-    Assert.assertEquals(bytesWritten / timeSinceLastReport,
+    Assertions.assertEquals(bytesWritten / timeSinceLastReport,
         report.getBytesWrittenPerSec());
-    Assert.assertEquals(bytesRead / timeSinceLastReport,
+    Assertions.assertEquals(bytesRead / timeSinceLastReport,
         report.getBytesReadPerSec());
-    Assert.assertEquals(writeTime, report.getWriteTime());
-    Assert.assertEquals(readTime, report.getReadTime());
-    Assert.assertEquals(writeBlock / timeSinceLastReport,
+    Assertions.assertEquals(writeTime, report.getWriteTime());
+    Assertions.assertEquals(readTime, report.getReadTime());
+    Assertions.assertEquals(writeBlock / timeSinceLastReport,
         report.getBlocksWrittenPerSec());
-    Assert.assertEquals(readBlock / timeSinceLastReport,
+    Assertions.assertEquals(readBlock / timeSinceLastReport,
         report.getBlocksReadPerSec());
 
     // Test3
     DataNodeUsageReport report2 = dnUsageUtil.getUsageReport(bytesWritten,
         bytesRead, writeTime, readTime, writeBlock, readBlock,
         0);
-    Assert.assertEquals(report, report2);
+    Assertions.assertEquals(report, report2);
 
     // Test4
     long bytesWritten2 = 50000;
@@ -103,15 +103,15 @@
         bytesRead2, writeTime2, readTime2, writeBlock2, readBlock2,
         timeSinceLastReport);
 
-    Assert.assertEquals((bytesWritten2 - bytesWritten) / timeSinceLastReport,
+    Assertions.assertEquals((bytesWritten2 - bytesWritten) / timeSinceLastReport,
         report2.getBytesWrittenPerSec());
-    Assert.assertEquals((bytesRead2 - bytesRead) / timeSinceLastReport,
+    Assertions.assertEquals((bytesRead2 - bytesRead) / timeSinceLastReport,
         report2.getBytesReadPerSec());
-    Assert.assertEquals(writeTime2 - writeTime, report2.getWriteTime());
-    Assert.assertEquals(readTime2 - readTime, report2.getReadTime());
-    Assert.assertEquals((writeBlock2 - writeBlock) / timeSinceLastReport,
+    Assertions.assertEquals(writeTime2 - writeTime, report2.getWriteTime());
+    Assertions.assertEquals(readTime2 - readTime, report2.getReadTime());
+    Assertions.assertEquals((writeBlock2 - writeBlock) / timeSinceLastReport,
         report2.getBlocksWrittenPerSec());
-    Assert.assertEquals((readBlock2 - readBlock) / timeSinceLastReport,
+    Assertions.assertEquals((readBlock2 - readBlock) / timeSinceLastReport,
         report2.getBlocksReadPerSec());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
index c06d389..7c4698b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
@@ -32,7 +32,8 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestDataDirs {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
index cb727e2..ebaf3fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
@@ -21,9 +21,9 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
@@ -41,7 +41,7 @@
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
       PipelineAck.ECN ecn = cluster.getDataNodes().get(0).getECN();
-      Assert.assertNotEquals(PipelineAck.ECN.DISABLED, ecn);
+      Assertions.assertNotEquals(PipelineAck.ECN.DISABLED, ecn);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
index ce65b6b..c416bc3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
@@ -38,13 +38,14 @@
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounterWithoutCheck;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+
 import java.io.IOException;
 
 /**
@@ -67,7 +68,7 @@
   private Configuration conf;
   private DistributedFileSystem fs;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
@@ -81,7 +82,7 @@
         StripedFileTestUtil.getDefaultECPolicy().getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -90,26 +91,26 @@
 
   @Test(timeout = 120000)
   public void testFullBlock() throws Exception {
-    Assert.assertEquals(0, getLongMetric("EcReconstructionReadTimeMillis"));
-    Assert.assertEquals(0, getLongMetric("EcReconstructionDecodingTimeMillis"));
-    Assert.assertEquals(0, getLongMetric("EcReconstructionWriteTimeMillis"));
+    Assertions.assertEquals(0, getLongMetric("EcReconstructionReadTimeMillis"));
+    Assertions.assertEquals(0, getLongMetric("EcReconstructionDecodingTimeMillis"));
+    Assertions.assertEquals(0, getLongMetric("EcReconstructionWriteTimeMillis"));
 
     doTest("/testEcMetrics", blockGroupSize, 0);
 
-    Assert.assertEquals("EcReconstructionTasks should be ",
-        1, getLongMetric("EcReconstructionTasks"));
-    Assert.assertEquals("EcFailedReconstructionTasks should be ",
-        0, getLongMetric("EcFailedReconstructionTasks"));
-    Assert.assertTrue(getLongMetric("EcDecodingTimeNanos") > 0);
-    Assert.assertEquals("EcReconstructionBytesRead should be ",
-        blockGroupSize, getLongMetric("EcReconstructionBytesRead"));
-    Assert.assertEquals("EcReconstructionBytesWritten should be ",
-        blockSize, getLongMetric("EcReconstructionBytesWritten"));
-    Assert.assertEquals("EcReconstructionRemoteBytesRead should be ",
-        0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"));
-    Assert.assertTrue(getLongMetric("EcReconstructionReadTimeMillis") > 0);
-    Assert.assertTrue(getLongMetric("EcReconstructionDecodingTimeMillis") > 0);
-    Assert.assertTrue(getLongMetric("EcReconstructionWriteTimeMillis") > 0);
+      Assertions.assertEquals(
+              1, getLongMetric("EcReconstructionTasks"), "EcReconstructionTasks should be ");
+      Assertions.assertEquals(
+              0, getLongMetric("EcFailedReconstructionTasks"), "EcFailedReconstructionTasks should be ");
+    Assertions.assertTrue(getLongMetric("EcDecodingTimeNanos") > 0);
+      Assertions.assertEquals(
+              blockGroupSize, getLongMetric("EcReconstructionBytesRead"), "EcReconstructionBytesRead should be ");
+      Assertions.assertEquals(
+              blockSize, getLongMetric("EcReconstructionBytesWritten"), "EcReconstructionBytesWritten should be ");
+      Assertions.assertEquals(
+              0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), "EcReconstructionRemoteBytesRead should be ");
+    Assertions.assertTrue(getLongMetric("EcReconstructionReadTimeMillis") > 0);
+    Assertions.assertTrue(getLongMetric("EcReconstructionDecodingTimeMillis") > 0);
+    Assertions.assertTrue(getLongMetric("EcReconstructionWriteTimeMillis") > 0);
   }
 
   // A partial block, reconstruct the partial block
@@ -118,12 +119,12 @@
     final int fileLen = blockSize / 10;
     doTest("/testEcBytes", fileLen, 0);
 
-    Assert.assertEquals("EcReconstructionBytesRead should be ",
-        fileLen,  getLongMetric("EcReconstructionBytesRead"));
-    Assert.assertEquals("EcReconstructionBytesWritten should be ",
-        fileLen, getLongMetric("EcReconstructionBytesWritten"));
-    Assert.assertEquals("EcReconstructionRemoteBytesRead should be ",
-        0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"));
+      Assertions.assertEquals(
+              fileLen,  getLongMetric("EcReconstructionBytesRead"), "EcReconstructionBytesRead should be ");
+      Assertions.assertEquals(
+              fileLen, getLongMetric("EcReconstructionBytesWritten"), "EcReconstructionBytesWritten should be ");
+      Assertions.assertEquals(
+              0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), "EcReconstructionRemoteBytesRead should be ");
   }
 
   // 1 full block + 5 partial block, reconstruct the full block
@@ -132,13 +133,13 @@
     final int fileLen = cellSize * dataBlocks + cellSize + cellSize / 10;
     doTest("/testEcBytes", fileLen, 0);
 
-    Assert.assertEquals("ecReconstructionBytesRead should be ",
-        cellSize * dataBlocks + cellSize + cellSize / 10,
-        getLongMetric("EcReconstructionBytesRead"));
-    Assert.assertEquals("EcReconstructionBytesWritten should be ",
-        blockSize, getLongMetric("EcReconstructionBytesWritten"));
-    Assert.assertEquals("EcReconstructionRemoteBytesRead should be ",
-        0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"));
+      Assertions.assertEquals(
+              cellSize * dataBlocks + cellSize + cellSize / 10,
+              getLongMetric("EcReconstructionBytesRead"), "ecReconstructionBytesRead should be ");
+      Assertions.assertEquals(
+              blockSize, getLongMetric("EcReconstructionBytesWritten"), "EcReconstructionBytesWritten should be ");
+      Assertions.assertEquals(
+              0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), "EcReconstructionRemoteBytesRead should be ");
   }
 
   // 1 full block + 5 partial block, reconstruct the partial block
@@ -147,14 +148,14 @@
     final int fileLen = cellSize * dataBlocks + cellSize + cellSize / 10;
     doTest("/testEcBytes", fileLen, 1);
 
-    Assert.assertEquals("ecReconstructionBytesRead should be ",
-        cellSize * dataBlocks + (cellSize / 10) * 2 ,
-        getLongMetric("EcReconstructionBytesRead"));
-    Assert.assertEquals("ecReconstructionBytesWritten should be ",
-        cellSize + cellSize / 10,
-        getLongMetric("EcReconstructionBytesWritten"));
-    Assert.assertEquals("EcReconstructionRemoteBytesRead should be ",
-        0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"));
+      Assertions.assertEquals(
+              cellSize * dataBlocks + (cellSize / 10) * 2,
+              getLongMetric("EcReconstructionBytesRead"), "ecReconstructionBytesRead should be ");
+      Assertions.assertEquals(
+              cellSize + cellSize / 10,
+              getLongMetric("EcReconstructionBytesWritten"), "ecReconstructionBytesWritten should be ");
+      Assertions.assertEquals(
+              0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"), "EcReconstructionRemoteBytesRead should be ");
   }
 
   private long getLongMetric(String metricName) {
@@ -195,14 +196,14 @@
     final DataNode toCorruptDn = cluster.getDataNode(
         lastBlock.getLocations()[deadNodeIndex].getIpcPort());
     LOG.info("Datanode to be corrupted: " + toCorruptDn);
-    assertNotNull("Failed to find a datanode to be corrupted", toCorruptDn);
+      assertNotNull(toCorruptDn, "Failed to find a datanode to be corrupted");
     toCorruptDn.shutdown();
     setDataNodeDead(toCorruptDn.getDatanodeId());
     DFSTestUtil.waitForDatanodeState(cluster, toCorruptDn.getDatanodeUuid(),
         false, 10000);
 
     final int workCount = getComputedDatanodeWork();
-    assertTrue("Wrongly computed block reconstruction work", workCount > 0);
+      assertTrue(workCount > 0, "Wrongly computed block reconstruction work");
     cluster.triggerHeartbeats();
     int totalBlocks =  (fileLen / blockGroupSize) * groupSize;
     final int remainder = fileLen % blockGroupSize;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
index 8c2fe37..d84fcd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
@@ -18,10 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.List;
@@ -31,9 +28,9 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 /** 
@@ -44,7 +41,7 @@
   Configuration conf;
   MiniDFSCluster cluster = null;
   
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
@@ -57,7 +54,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -78,8 +75,8 @@
       Thread.sleep(WAIT_TIME_IN_MILLIS);
       iterations--;
     }
-    assertEquals("Mismatch in number of BPServices running", expected,
-        dn.getBpOsCount());
+      assertEquals(expected,
+              dn.getBpOsCount(), "Mismatch in number of BPServices running");
   }
 
   @Test
@@ -100,9 +97,9 @@
   public void testBPServiceExit() throws Exception {
     DataNode dn = cluster.getDataNodes().get(0);
     stopBPServiceThreads(1, dn);
-    assertTrue("DataNode should not exit", dn.isDatanodeUp());
+      assertTrue(dn.isDatanodeUp(), "DataNode should not exit");
     stopBPServiceThreads(2, dn);
-    assertFalse("DataNode should exit", dn.isDatanodeUp());
+      assertFalse(dn.isDatanodeUp(), "DataNode should exit");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java
index 27f5f02..b4e6ef5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java
@@ -25,12 +25,12 @@
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.util.Set;
 import java.util.TreeSet;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class TestDataNodeFSDataSetSink {
   private static final MetricsSystemImpl ms = new
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
index 6879dc0..cca3adf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
@@ -161,8 +161,8 @@
       }
       LOG.info("delay info: " + mdnFaultInjector.getDelayMs() + ":"
           + datanodeSlowLogThresholdMs);
-      assertTrue("Injected delay should be longer than the configured one",
-          mdnFaultInjector.getDelayMs() > datanodeSlowLogThresholdMs);
+        assertTrue(
+                mdnFaultInjector.getDelayMs() > datanodeSlowLogThresholdMs, "Injected delay should be longer than the configured one");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 098d8a4..163ca9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -51,10 +51,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 
 import java.io.File;
@@ -86,13 +85,9 @@
 import static org.hamcrest.CoreMatchers.anyOf;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.doAnswer;
@@ -106,7 +101,7 @@
   private MiniDFSCluster cluster;
   private Configuration conf;
 
-  @After
+  @AfterEach
   public void tearDown() {
     shutdown();
   }
@@ -370,8 +365,8 @@
     };
     Collections.sort(expectedStorageLocations, comparator);
     Collections.sort(effectiveStorageLocations, comparator);
-    assertEquals("Effective volumes doesnt match expected",
-        expectedStorageLocations, effectiveStorageLocations);
+      assertEquals(
+              expectedStorageLocations, effectiveStorageLocations, "Effective volumes doesnt match expected");
 
     // Check that all newly created volumes are appropriately formatted.
     for (File volumeDir : newVolumeDirs) {
@@ -626,10 +621,10 @@
     }
     listStorageThread.join();
 
-    // Verify errors while adding volumes and listing storage directories
-    Assert.assertEquals("Error adding volumes!", false, addVolumeError.get());
-    Assert.assertEquals("Error listing storage!",
-        false, listStorageError.get());
+      // Verify errors while adding volumes and listing storage directories
+      Assertions.assertEquals(false, addVolumeError.get(), "Error adding volumes!");
+      Assertions.assertEquals(
+              false, listStorageError.get(), "Error listing storage!");
 
     int additionalBlockCount = 9;
     int totalBlockCount = initialBlockCount + additionalBlockCount;
@@ -968,8 +963,8 @@
         System.out.println("Vol: " +
             fsVolumeReferences.get(i).getBaseURI().toString());
       }
-      assertEquals("Volume remove wasn't successful.",
-          1, fsVolumeReferences.size());
+        assertEquals(
+                1, fsVolumeReferences.size(), "Volume remove wasn't successful.");
     }
 
     // Verify the file has sufficient replications.
@@ -990,8 +985,8 @@
 
     try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
         .getFsVolumeReferences()) {
-      assertEquals("Volume remove wasn't successful.",
-          1, fsVolumeReferences.size());
+        assertEquals(
+                1, fsVolumeReferences.size(), "Volume remove wasn't successful.");
       FsVolumeSpi volume = fsVolumeReferences.get(0);
       String bpid = cluster.getNamesystem().getBlockPoolId();
       FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test");
@@ -1000,8 +995,8 @@
         blkIter.nextBlock();
         blockCount++;
       }
-      assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
-          dataNodeIdx), blockCount > 1);
+        assertTrue(blockCount > 1, String.format("DataNode(%d) should have more than 1 blocks",
+                dataNodeIdx));
     }
   }
 
@@ -1065,8 +1060,8 @@
     File dirToFail = cluster.getInstanceStorageDir(0, 0);
 
     FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail);
-    assertTrue("No FsVolume was found for " + dirToFail,
-        failedVolume != null);
+      assertTrue(
+              failedVolume != null, "No FsVolume was found for " + dirToFail);
     long used = failedVolume.getDfsUsed();
 
     DataNodeTestUtils.injectDataDirFailure(dirToFail);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java
index df8be22..4719870 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java
@@ -30,7 +30,6 @@
 import org.junit.Test;
 
 import static org.hamcrest.core.IsNot.not;
-import static org.junit.Assert.*;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
index 44f9069..8e6f409 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
@@ -28,9 +28,7 @@
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.anyInt;
@@ -57,10 +55,10 @@
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import org.mockito.invocation.InvocationOnMock;
@@ -96,7 +94,7 @@
   private DataNode dn;
   private BPServiceActor bpsa;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     // Configure cluster with lifeline RPC server enabled, and down-tune
     // heartbeat timings to try to force quick dead/stale DataNodes.
@@ -143,7 +141,7 @@
     bpsa.setNameNode(namenode);
   }
 
-  @After
+  @AfterEach
   public void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -189,12 +187,12 @@
     // poll DataNode tracking information.  Thanks to the lifeline, we expect
     // that the DataNode always stays alive, and never goes stale or dead.
     while (!lifelinesSent.await(1, SECONDS)) {
-      assertEquals("Expect DataNode to be kept alive by lifeline.", 1,
-          namesystem.getNumLiveDataNodes());
-      assertEquals("Expect DataNode not marked dead due to lifeline.", 0,
-          namesystem.getNumDeadDataNodes());
-      assertEquals("Expect DataNode not marked stale due to lifeline.", 0,
-          namesystem.getNumStaleDataNodes());
+        assertEquals(1,
+                namesystem.getNumLiveDataNodes(), "Expect DataNode to be kept alive by lifeline.");
+        assertEquals(0,
+                namesystem.getNumDeadDataNodes(), "Expect DataNode not marked dead due to lifeline.");
+        assertEquals(0,
+                namesystem.getNumStaleDataNodes(), "Expect DataNode not marked stale due to lifeline.");
       // add a new volume on the next heartbeat
       cluster.getDataNodes().get(0).reconfigurePropertyImpl(
           DFS_DATANODE_DATA_DIR_KEY,
@@ -212,13 +210,13 @@
         anyInt(),
         any());
 
-    // Also verify lifeline call through metrics.  We expect at least
-    // numLifelines, guaranteed by waiting on the latch.  There is a small
-    // possibility of extra lifeline calls depending on timing, so we allow
-    // slack in the assertion.
-    assertTrue("Expect metrics to count at least " + numLifelines + " calls.",
-        getLongCounter("LifelinesNumOps", getMetrics(metrics.name())) >=
-            numLifelines);
+      // Also verify lifeline call through metrics.  We expect at least
+      // numLifelines, guaranteed by waiting on the latch.  There is a small
+      // possibility of extra lifeline calls depending on timing, so we allow
+      // slack in the assertion.
+      assertTrue(
+              getLongCounter("LifelinesNumOps", getMetrics(metrics.name())) >=
+                      numLifelines, "Expect metrics to count at least " + numLifelines + " calls.");
   }
 
   @Test
@@ -246,12 +244,12 @@
     // poll DataNode tracking information.  We expect that the DataNode always
     // stays alive, and never goes stale or dead.
     while (!heartbeatsSent.await(1, SECONDS)) {
-      assertEquals("Expect DataNode to be kept alive by lifeline.", 1,
-          namesystem.getNumLiveDataNodes());
-      assertEquals("Expect DataNode not marked dead due to lifeline.", 0,
-          namesystem.getNumDeadDataNodes());
-      assertEquals("Expect DataNode not marked stale due to lifeline.", 0,
-          namesystem.getNumStaleDataNodes());
+        assertEquals(1,
+                namesystem.getNumLiveDataNodes(), "Expect DataNode to be kept alive by lifeline.");
+        assertEquals(0,
+                namesystem.getNumDeadDataNodes(), "Expect DataNode not marked dead due to lifeline.");
+        assertEquals(0,
+                namesystem.getNumStaleDataNodes(), "Expect DataNode not marked stale due to lifeline.");
     }
 
     // Verify that we did not call the lifeline RPC.
@@ -265,9 +263,9 @@
         anyInt(),
         any());
 
-    // Also verify no lifeline calls through metrics.
-    assertEquals("Expect metrics to count no lifeline calls.", 0,
-        getLongCounter("LifelinesNumOps", getMetrics(metrics.name())));
+      // Also verify no lifeline calls through metrics.
+      assertEquals(0,
+              getLongCounter("LifelinesNumOps", getMetrics(metrics.name())), "Expect metrics to count no lifeline calls.");
   }
 
   @Test
@@ -276,11 +274,11 @@
     assertTrue(initialCapacity > 0);
     dn.setHeartbeatsDisabledForTests(true);
     cluster.setDataNodesDead();
-    assertEquals("Capacity should be 0 after all DNs dead", 0, cluster
-        .getNamesystem(0).getCapacityTotal());
+      assertEquals(0, cluster
+              .getNamesystem(0).getCapacityTotal(), "Capacity should be 0 after all DNs dead");
     bpsa.sendLifelineForTests();
-    assertEquals("Lifeline should be ignored for dead node", 0, cluster
-        .getNamesystem(0).getCapacityTotal());
+      assertEquals(0, cluster
+              .getNamesystem(0).getCapacityTotal(), "Lifeline should be ignored for dead node");
     // Wait for re-registration and heartbeat
     dn.setHeartbeatsDisabledForTests(false);
     final DatanodeDescriptor dnDesc = cluster.getNamesystem(0).getBlockManager()
@@ -292,8 +290,8 @@
         return dnDesc.isAlive() && dnDesc.isHeartbeatedSinceRegistration();
       }
     }, 100, 5000);
-    assertEquals("Capacity should include only live capacity", initialCapacity,
-        cluster.getNamesystem(0).getCapacityTotal());
+      assertEquals(initialCapacity,
+              cluster.getNamesystem(0).getCapacityTotal(), "Capacity should include only live capacity");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
index 73f664b..b035561 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -41,12 +41,12 @@
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.eclipse.jetty.util.ajax.JSON;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Class for testing {@link DataNodeMXBean} implementation
@@ -63,7 +63,7 @@
 
     try {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      Assertions.assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
@@ -71,49 +71,49 @@
           "Hadoop:service=DataNode,name=DataNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
-      Assert.assertEquals(datanode.getClusterId(), clusterId);
+      Assertions.assertEquals(datanode.getClusterId(), clusterId);
       // get attribute "Version"
       String version = (String)mbs.getAttribute(mxbeanName, "Version");
-      Assert.assertEquals(datanode.getVersion(),version);
+      Assertions.assertEquals(datanode.getVersion(),version);
       // get attribute "SotfwareVersion"
       String softwareVersion =
           (String)mbs.getAttribute(mxbeanName, "SoftwareVersion");
-      Assert.assertEquals(datanode.getSoftwareVersion(),softwareVersion);
-      Assert.assertEquals(version, softwareVersion
+      Assertions.assertEquals(datanode.getSoftwareVersion(),softwareVersion);
+      Assertions.assertEquals(version, softwareVersion
           + ", r" + datanode.getRevision());
       // get attribute "RpcPort"
       String rpcPort = (String)mbs.getAttribute(mxbeanName, "RpcPort");
-      Assert.assertEquals(datanode.getRpcPort(),rpcPort);
+      Assertions.assertEquals(datanode.getRpcPort(),rpcPort);
       // get attribute "HttpPort"
       String httpPort = (String)mbs.getAttribute(mxbeanName, "HttpPort");
-      Assert.assertEquals(datanode.getHttpPort(),httpPort);
+      Assertions.assertEquals(datanode.getHttpPort(),httpPort);
       // get attribute "NamenodeAddresses"
       String namenodeAddresses = (String)mbs.getAttribute(mxbeanName, 
           "NamenodeAddresses");
-      Assert.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses);
+      Assertions.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses);
       // get attribute "getDatanodeHostname"
       String datanodeHostname = (String)mbs.getAttribute(mxbeanName,
           "DatanodeHostname");
-      Assert.assertEquals(datanode.getDatanodeHostname(),datanodeHostname);
+      Assertions.assertEquals(datanode.getDatanodeHostname(),datanodeHostname);
       // get attribute "getVolumeInfo"
       String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
-      Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),
+      Assertions.assertEquals(replaceDigits(datanode.getVolumeInfo()),
           replaceDigits(volumeInfo));
       // Ensure mxbean's XceiverCount is same as the DataNode's
       // live value.
       int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
           "XceiverCount");
-      Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
+      Assertions.assertEquals(datanode.getXceiverCount(), xceiverCount);
       // Ensure mxbean's XmitsInProgress is same as the DataNode's
       // live value.
       int xmitsInProgress =
           (Integer) mbs.getAttribute(mxbeanName, "XmitsInProgress");
-      Assert.assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
+      Assertions.assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
       String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
           "BPServiceActorInfo");
-      Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);
+      Assertions.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);
       String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks");
-      Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
+      Assertions.assertEquals(datanode.getSlowDisks(), slowDisks);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -130,7 +130,7 @@
     try (MiniDFSCluster cluster =
                  new MiniDFSCluster.Builder(simpleConf).build()) {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      Assertions.assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
@@ -139,15 +139,15 @@
 
       boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
               "SecurityEnabled");
-      Assert.assertFalse(securityEnabled);
-      Assert.assertEquals(datanode.isSecurityEnabled(), securityEnabled);
+      Assertions.assertFalse(securityEnabled);
+      Assertions.assertEquals(datanode.isSecurityEnabled(), securityEnabled);
     }
 
     // get attribute "SecurityEnabled" with secure configuration
     try (MiniDFSCluster cluster =
                  new MiniDFSCluster.Builder(secureConf).build()) {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      Assertions.assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
@@ -156,8 +156,8 @@
 
       boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
               "SecurityEnabled");
-      Assert.assertTrue(securityEnabled);
-      Assert.assertEquals(datanode.isSecurityEnabled(), securityEnabled);
+      Assertions.assertTrue(securityEnabled);
+      Assertions.assertEquals(datanode.isSecurityEnabled(), securityEnabled);
     }
 
     // setting back the authentication method
@@ -186,7 +186,7 @@
           "Hadoop:service=DataNode,name=DataNodeInfo");
       String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
           "BPServiceActorInfo");
-      Assert.assertEquals(dn.getBPServiceActorInfo(), bpActorInfo);
+      Assertions.assertEquals(dn.getBPServiceActorInfo(), bpActorInfo);
       LOG.info("bpActorInfo is " + bpActorInfo);
       TypeReference<ArrayList<Map<String, String>>> typeRef
           = new TypeReference<ArrayList<Map<String, String>>>() {};
@@ -201,12 +201,12 @@
           Integer.valueOf(bpActorInfoList.get(0).get("maxBlockReportSize"));
       LOG.info("maxDataLength is " + maxDataLength);
       LOG.info("maxBlockReportSize is " + maxBlockReportSize);
-      assertTrue("maxBlockReportSize should be greater than zero",
-          maxBlockReportSize > 0);
-      assertEquals("maxDataLength should be exactly "
-          + "the same value of ipc.maximum.data.length",
-          confMaxDataLength,
-          maxDataLength);
+        assertTrue(
+                maxBlockReportSize > 0, "maxBlockReportSize should be greater than zero");
+        assertEquals(
+                confMaxDataLength,
+                maxDataLength, "maxDataLength should be exactly "
+                + "the same value of ipc.maximum.data.length");
     }
   }
 
@@ -227,10 +227,10 @@
         DFSTestUtil.createFile(fs, new Path("/tmp.txt" + i), 1024, (short) 1,
                 1L);
       }
-      assertEquals("Before restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+        assertEquals(5, getTotalNumBlocks(mbs, mxbeanName), "Before restart DN");
       cluster.restartDataNode(0);
       cluster.waitActive();
-      assertEquals("After restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+        assertEquals(5, getTotalNumBlocks(mbs, mxbeanName), "After restart DN");
       fs.delete(new Path("/tmp.txt1"), true);
       // The total numBlocks should be updated after one file is deleted
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -274,7 +274,7 @@
 
     try {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      Assertions.assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
       String slowDiskPath = "test/data1/slowVolume";
       datanode.getDiskMetrics().addSlowDiskForTesting(slowDiskPath, null);
@@ -284,8 +284,8 @@
           "Hadoop:service=DataNode,name=DataNodeInfo");
 
       String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks");
-      Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
-      Assert.assertTrue(slowDisks.contains(slowDiskPath));
+      Assertions.assertEquals(datanode.getSlowDisks(), slowDisks);
+      Assertions.assertTrue(slowDisks.contains(slowDiskPath));
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 602ac00..75f74f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -22,7 +22,7 @@
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.Closeable;
 import java.io.FileNotFoundException;
@@ -89,8 +89,8 @@
       DataNode datanode = datanodes.get(0);
       MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
       assertCounter("BytesWritten", LONG_FILE_LEN, rb);
-      assertTrue("Expected non-zero number of incremental block reports",
-          getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
+        assertTrue(
+                getLongCounter("IncrementalBlockReportsNumOps", rb) > 0, "Expected non-zero number of incremental block reports");
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
@@ -196,10 +196,10 @@
       List<DataNode> datanodes = cluster.getDataNodes();
       DataNode datanode = datanodes.get(0);
       MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
-      assertTrue("More than 1 packet received",
-          getLongCounter("TotalPacketsReceived", dnMetrics) > 1L);
-      assertTrue("More than 1 slow packet to mirror",
-          getLongCounter("TotalPacketsSlowWriteToMirror", dnMetrics) > 1L);
+        assertTrue(
+                getLongCounter("TotalPacketsReceived", dnMetrics) > 1L, "More than 1 packet received");
+        assertTrue(
+                getLongCounter("TotalPacketsSlowWriteToMirror", dnMetrics) > 1L, "More than 1 slow packet to mirror");
       assertCounter("TotalPacketsSlowWriteToDisk", 1L, dnMetrics);
       assertCounter("TotalPacketsSlowWriteToOsCache", 0L, dnMetrics);
     } finally {
@@ -293,15 +293,15 @@
           break;
         }
       }
-      assertNotNull("Could not find the head of the datanode write pipeline", 
-          headNode);
+        assertNotNull(
+                headNode, "Could not find the head of the datanode write pipeline");
       // Close the file and wait for the metrics to rollover
       Thread.sleep((interval + 1) * 1000);
       // Check the ack was received
       MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
           .name());
-      assertTrue("Expected non-zero number of acks", 
-          getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
+        assertTrue(
+                getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0, "Expected non-zero number of acks");
       assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
           + "s", dnMetrics);
     } finally {
@@ -346,10 +346,10 @@
       final Object dnc =
           mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");
       final String allDnc = dnc.toString();
-      assertTrue("expected to see loopback address",
-          allDnc.indexOf("127.0.0.1") >= 0);
-      assertTrue("expected to see networkErrors",
-          allDnc.indexOf("networkErrors") >= 0);
+        assertTrue(
+                allDnc.indexOf("127.0.0.1") >= 0, "expected to see loopback address");
+        assertTrue(
+                allDnc.indexOf("networkErrors") >= 0, "expected to see networkErrors");
     } finally {
       IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0]));
       if (cluster != null) {
@@ -420,7 +420,7 @@
 
       MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
       long blocksReplicated = getLongCounter("BlocksReplicated", rb);
-      assertEquals("No blocks replicated yet", 0, blocksReplicated);
+        assertEquals(0, blocksReplicated, "No blocks replicated yet");
 
       Path path = new Path("/counter.txt");
       DFSTestUtil.createFile(fs, path, 1024, (short) 2, Time.monotonicNow());
@@ -430,7 +430,7 @@
 
       MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
       blocksReplicated = getLongCounter("BlocksReplicated", rbNew);
-      assertEquals("blocks replicated counter incremented", 1, blocksReplicated);
+        assertEquals(1, blocksReplicated, "blocks replicated counter incremented");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -550,8 +550,8 @@
                 CachingStrategy.newDefaultStrategy());
         fail("Must throw FileNotFoundException");
       } catch (FileNotFoundException fe) {
-        assertTrue("Should throw too many open files",
-                fe.getMessage().contains("Too many open files"));
+          assertTrue(
+                  fe.getMessage().contains("Too many open files"), "Should throw too many open files");
       }
       cluster.triggerHeartbeats(); // IBR delete ack
       //After DN throws too many open files
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
index 8443c36..d3a2571 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -48,10 +46,10 @@
 import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.AsyncAppender;
 import org.apache.log4j.spi.LoggingEvent;
-import org.junit.After;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.util.function.Supplier;
@@ -100,7 +98,7 @@
    * @throws IOException
    *           if an error occurred
    */
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (dn != null) {
       try {
@@ -110,8 +108,8 @@
       } finally {
         File dir = new File(DATA_DIR);
         if (dir.exists())
-          Assert.assertTrue("Cannot delete data-node dirs",
-              FileUtil.fullyDelete(dir));
+            Assertions.assertTrue(
+                    FileUtil.fullyDelete(dir), "Cannot delete data-node dirs");
       }
     }
     dn = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
index 5167042..f1cbb2e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
@@ -18,10 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -44,16 +41,16 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestDataNodeMultipleRegistrations {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestDataNodeMultipleRegistrations.class);
   Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
   }
@@ -73,8 +70,8 @@
       cluster.waitActive();
       NameNode nn1 = cluster.getNameNode(0);
       NameNode nn2 = cluster.getNameNode(1);
-      assertNotNull("cannot create nn1", nn1);
-      assertNotNull("cannot create nn2", nn2);
+        assertNotNull(nn1, "cannot create nn1");
+        assertNotNull(nn2, "cannot create nn2");
 
       String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
       String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
@@ -84,7 +81,7 @@
       int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
       int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID();
       int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID();
-      assertNotSame("namespace ids should be different", ns1, ns2);
+        assertNotSame(ns1, ns2, "namespace ids should be different");
       LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri="
           + nn1.getNameNodeAddress());
       LOG.info("nn2: lv=" + lv2 + ";cid=" + cid2 + ";bpid=" + bpid2 + ";uri="
@@ -93,15 +90,15 @@
       // check number of volumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
       final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
-      Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
+        Assertions.assertTrue(volInfos.size() > 0, "No volumes in the fsdataset");
       int i = 0;
       for (Map.Entry<String, Object> e : volInfos.entrySet()) {
         LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
       }
-      // number of volumes should be 2 - [data1, data2]
-      assertEquals("number of volumes is wrong",
-          cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(),
-          volInfos.size());
+        // number of volumes should be 2 - [data1, data2]
+        assertEquals(
+                cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(),
+                volInfos.size(), "number of volumes is wrong");
 
       for (BPOfferService bpos : dn.getAllBpOs()) {
         LOG.info("BP: " + bpos);
@@ -117,18 +114,18 @@
         bpos2 = tmp;
       }
 
-      assertEquals("wrong nn address", getNNSocketAddress(bpos1),
-          nn1.getNameNodeAddress());
-      assertEquals("wrong nn address", getNNSocketAddress(bpos2),
-          nn2.getNameNodeAddress());
-      assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
-      assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2);
-      assertEquals("wrong cid", dn.getClusterId(), cid1);
-      assertEquals("cid should be same", cid2, cid1);
-      assertEquals("namespace should be same",
-          bpos1.bpNSInfo.namespaceID, ns1);
-      assertEquals("namespace should be same",
-          bpos2.bpNSInfo.namespaceID, ns2);
+        assertEquals(getNNSocketAddress(bpos1),
+                nn1.getNameNodeAddress(), "wrong nn address");
+        assertEquals(getNNSocketAddress(bpos2),
+                nn2.getNameNodeAddress(), "wrong nn address");
+        assertEquals(bpos1.getBlockPoolId(), bpid1, "wrong bpid");
+        assertEquals(bpos2.getBlockPoolId(), bpid2, "wrong bpid");
+        assertEquals(dn.getClusterId(), cid1, "wrong cid");
+        assertEquals(cid2, cid1, "cid should be same");
+        assertEquals(
+                bpos1.bpNSInfo.namespaceID, ns1, "namespace should be same");
+        assertEquals(
+                bpos2.bpNSInfo.namespaceID, ns2, "namespace should be same");
     } finally {
       cluster.shutdown();
     }
@@ -151,7 +148,7 @@
         .nameNodePort(9927).build();
     try {
       NameNode nn1 = cluster.getNameNode();
-      assertNotNull("cannot create nn1", nn1);
+        assertNotNull(nn1, "cannot create nn1");
 
       String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
       String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
@@ -162,15 +159,15 @@
       // check number of vlumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
       final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
-      Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
+        Assertions.assertTrue(volInfos.size() > 0, "No volumes in the fsdataset");
       int i = 0;
       for (Map.Entry<String, Object> e : volInfos.entrySet()) {
         LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
       }
-      // number of volumes should be 2 - [data1, data2]
-      assertEquals("number of volumes is wrong",
-          cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(),
-          volInfos.size());
+        // number of volumes should be 2 - [data1, data2]
+        assertEquals(
+                cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(),
+                volInfos.size(), "number of volumes is wrong");
 
       for (BPOfferService bpos : dn.getAllBpOs()) {
         LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="
@@ -182,11 +179,11 @@
       BPOfferService bpos1 = dn.getAllBpOs().get(0);
       bpos1.triggerBlockReportForTests();
 
-      assertEquals("wrong nn address",
-          getNNSocketAddress(bpos1),
-          nn1.getNameNodeAddress());
-      assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
-      assertEquals("wrong cid", dn.getClusterId(), cid1);
+        assertEquals(
+                getNNSocketAddress(bpos1),
+                nn1.getNameNodeAddress(), "wrong nn address");
+        assertEquals(bpos1.getBlockPoolId(), bpid1, "wrong bpid");
+        assertEquals(dn.getClusterId(), cid1, "wrong cid");
       cluster.shutdown();
       
       // Ensure all the BPOfferService threads are shutdown
@@ -210,25 +207,25 @@
       DataNode dn = cluster.getDataNodes().get(0);
       List<BPOfferService> bposs = dn.getAllBpOs();
       LOG.info("dn bpos len (should be 2):" + bposs.size());
-      Assert.assertEquals("should've registered with two namenodes", bposs.size(),2);
+        Assertions.assertEquals(bposs.size(), 2, "should've registered with two namenodes");
       
       // add another namenode
       cluster.addNameNode(conf, 9938);
       Thread.sleep(500);// lets wait for the registration to happen
       bposs = dn.getAllBpOs(); 
       LOG.info("dn bpos len (should be 3):" + bposs.size());
-      Assert.assertEquals("should've registered with three namenodes", bposs.size(),3);
+        Assertions.assertEquals(bposs.size(), 3, "should've registered with three namenodes");
       
       // change cluster id and another Namenode
       StartupOption.FORMAT.setClusterId("DifferentCID");
       cluster.addNameNode(conf, 9948);
       NameNode nn4 = cluster.getNameNode(3);
-      assertNotNull("cannot create nn4", nn4);
+        assertNotNull(nn4, "cannot create nn4");
 
       Thread.sleep(500);// lets wait for the registration to happen
       bposs = dn.getAllBpOs(); 
       LOG.info("dn bpos len (still should be 3):" + bposs.size());
-      Assert.assertEquals("should've registered with three namenodes", 3, bposs.size());
+        Assertions.assertEquals(3, bposs.size(), "should've registered with three namenodes");
     } finally {
         cluster.shutdown();
     }
@@ -254,9 +251,9 @@
       // let the initialization be complete
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
-      assertTrue("Datanode should be running", dn.isDatanodeUp());
-      assertEquals("Only one BPOfferService should be running", 1,
-          dn.getAllBpOs().size());
+        assertTrue(dn.isDatanodeUp(), "Datanode should be running");
+        assertEquals(1,
+                dn.getAllBpOs().size(), "Only one BPOfferService should be running");
     } finally {
       cluster.shutdown();
     }
@@ -278,9 +275,9 @@
       // let the initialization be complete
       cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
-      assertTrue("Datanode should be running", dn.isDatanodeUp());
-      assertEquals("BPOfferService should be running", 1,
-          dn.getAllBpOs().size());
+        assertTrue(dn.isDatanodeUp(), "Datanode should be running");
+        assertEquals(1,
+                dn.getAllBpOs().size(), "BPOfferService should be running");
       DataNodeProperties dnProp = cluster.stopDataNode(0);
 
       cluster.getNameNode(0).stop();
@@ -328,12 +325,12 @@
     // add a node
     try {
       cluster.waitActive();
-      Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+        Assertions.assertEquals(2, cluster.getNumNameNodes(), "(1)Should be 2 namenodes");
 
       cluster.addNameNode(conf, 0);
-      Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
+        Assertions.assertEquals(3, cluster.getNumNameNodes(), "(1)Should be 3 namenodes");
     } catch (IOException ioe) {
-      Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+      Assertions.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
     } finally {
       cluster.shutdown();
     }
@@ -345,15 +342,15 @@
       .build();
     
     try {
-      Assert.assertNotNull(cluster);
+      Assertions.assertNotNull(cluster);
       cluster.waitActive();
-      Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+        Assertions.assertEquals(1, cluster.getNumNameNodes(), "(2)Should be 1 namenodes");
     
       // add a node
       cluster.addNameNode(conf, 0);
-      Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+        Assertions.assertEquals(2, cluster.getNumNameNodes(), "(2)Should be 2 namenodes");
     } catch (IOException ioe) {
-      Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+      Assertions.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
     } finally {
       cluster.shutdown();
     }
@@ -365,15 +362,15 @@
     // add a node
     try {
       cluster.waitActive();
-      Assert.assertNotNull(cluster);
-      Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+      Assertions.assertNotNull(cluster);
+        Assertions.assertEquals(1, cluster.getNumNameNodes(), "(2)Should be 1 namenodes");
 
       cluster.addNameNode(conf, 9929);
-      Assert.fail("shouldn't be able to add another NN to non federated cluster");
+      Assertions.fail("shouldn't be able to add another NN to non federated cluster");
     } catch (IOException e) {
       // correct 
-      Assert.assertTrue(e.getMessage().startsWith("cannot add namenode"));
-      Assert.assertEquals("(3)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+      Assertions.assertTrue(e.getMessage().startsWith("cannot add namenode"));
+        Assertions.assertEquals(1, cluster.getNumNameNodes(), "(3)Should be 1 namenodes");
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodePeerMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodePeerMetrics.java
index 41fb41f..f3d3b90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodePeerMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodePeerMetrics.java
@@ -34,8 +34,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PEER_METRICS_MIN_OUTLIER_DETECTION_SAMPLES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY;
 import static org.hamcrest.CoreMatchers.containsString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * This class tests various cases of DataNode peer metrics.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
index 8cbd38b..0a504d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
@@ -20,9 +20,7 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -36,10 +34,10 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test to reconfigure some parameters for DataNode without restart
@@ -54,12 +52,12 @@
   private final int NUM_DATA_NODE = 10;
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void Setup() throws IOException {
     startDFSCluster(NUM_NAME_NODE, NUM_DATA_NODE);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -68,8 +66,8 @@
 
     File dir = new File(DATA_DIR);
     if (dir.exists())
-      Assert.assertTrue("Cannot delete data-node dirs",
-          FileUtil.fullyDelete(dir));
+        Assertions.assertTrue(
+                FileUtil.fullyDelete(dir), "Cannot delete data-node dirs");
   }
 
   private void startDFSCluster(int numNameNodes, int numDataNodes)
@@ -117,8 +115,8 @@
             DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, "text");
         fail("ReconfigurationException expected");
       } catch (ReconfigurationException expected) {
-        assertTrue("expecting NumberFormatException",
-            expected.getCause() instanceof NumberFormatException);
+          assertTrue(
+                  expected.getCause() instanceof NumberFormatException, "expecting NumberFormatException");
       }
       try {
         dn.reconfigureProperty(
@@ -126,8 +124,8 @@
             String.valueOf(-1));
         fail("ReconfigurationException expected");
       } catch (ReconfigurationException expected) {
-        assertTrue("expecting IllegalArgumentException",
-            expected.getCause() instanceof IllegalArgumentException);
+          assertTrue(
+                  expected.getCause() instanceof IllegalArgumentException, "expecting IllegalArgumentException");
       }
       try {
         dn.reconfigureProperty(
@@ -135,37 +133,37 @@
             String.valueOf(0));
         fail("ReconfigurationException expected");
       } catch (ReconfigurationException expected) {
-        assertTrue("expecting IllegalArgumentException",
-            expected.getCause() instanceof IllegalArgumentException);
+          assertTrue(
+                  expected.getCause() instanceof IllegalArgumentException, "expecting IllegalArgumentException");
       }
 
       // change properties
       dn.reconfigureProperty(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
           String.valueOf(maxConcurrentMovers));
 
-      // verify change
-      assertEquals(String.format("%s has wrong value",
-          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY),
-          maxConcurrentMovers, dn.xserver.balanceThrottler.getMaxConcurrentMovers());
+        // verify change
+        assertEquals(
+                maxConcurrentMovers, dn.xserver.balanceThrottler.getMaxConcurrentMovers(), String.format("%s has wrong value",
+                DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY));
 
-      assertEquals(String.format("%s has wrong value",
-          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY),
-          maxConcurrentMovers, Integer.parseInt(dn.getConf().get(
-              DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY)));
+        assertEquals(
+                maxConcurrentMovers, Integer.parseInt(dn.getConf().get(
+                DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY)), String.format("%s has wrong value",
+                DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY));
 
       // revert to default
       dn.reconfigureProperty(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
           null);
 
-      // verify default
-      assertEquals(String.format("%s has wrong value",
-          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY),
-          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT,
-          dn.xserver.balanceThrottler.getMaxConcurrentMovers());
+        // verify default
+        assertEquals(
+                DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT,
+                dn.xserver.balanceThrottler.getMaxConcurrentMovers(), String.format("%s has wrong value",
+                DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY));
 
-      assertEquals(String.format("expect %s is not configured",
-          DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY), null, dn
-          .getConf().get(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY));
+        assertEquals(null, dn
+                .getConf().get(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY), String.format("expect %s is not configured",
+                DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY));
     }
   }
 
@@ -216,7 +214,7 @@
       // Attempt to set new maximum to 1
       final boolean success =
           dataNode.xserver.updateBalancerMaxConcurrentMovers(1);
-      Assert.assertFalse(success);
+      Assertions.assertFalse(success);
     } finally {
       dataNode.shutdown();
     }
@@ -244,9 +242,9 @@
       dataNode.reconfigurePropertyImpl(
           DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, "1");
     } catch (ReconfigurationException e) {
-      Assert.assertEquals(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+      Assertions.assertEquals(DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
           e.getProperty());
-      Assert.assertEquals("1", e.getNewValue());
+      Assertions.assertEquals("1", e.getNewValue());
       throw e;
     } finally {
       dataNode.shutdown();
@@ -263,12 +261,12 @@
     /** Test that the default setup is working */
 
     for (int i = 0; i < defaultMaxThreads; i++) {
-      assertEquals("should be able to get thread quota", true,
-          dataNode.xserver.balanceThrottler.acquire());
+        assertEquals(true,
+                dataNode.xserver.balanceThrottler.acquire(), "should be able to get thread quota");
     }
 
-    assertEquals("should not be able to get thread quota", false,
-        dataNode.xserver.balanceThrottler.acquire());
+      assertEquals(false,
+              dataNode.xserver.balanceThrottler.acquire(), "should not be able to get thread quota");
 
     // Give back the threads
     for (int i = 0; i < defaultMaxThreads; i++) {
@@ -282,15 +280,15 @@
         DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
         String.valueOf(maxConcurrentMovers));
 
-    assertEquals("thread quota is wrong", maxConcurrentMovers,
-        dataNode.xserver.balanceThrottler.getMaxConcurrentMovers());
+      assertEquals(maxConcurrentMovers,
+              dataNode.xserver.balanceThrottler.getMaxConcurrentMovers(), "thread quota is wrong");
 
     for (int i = 0; i < maxConcurrentMovers; i++) {
-      assertEquals("should be able to get thread quota", true,
-          dataNode.xserver.balanceThrottler.acquire());
+        assertEquals(true,
+                dataNode.xserver.balanceThrottler.acquire(), "should be able to get thread quota");
     }
 
-    assertEquals("should not be able to get thread quota", false,
-        dataNode.xserver.balanceThrottler.acquire());
+      assertEquals(false,
+              dataNode.xserver.balanceThrottler.acquire(), "should not be able to get thread quota");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
index b5af880..d7d7dc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -27,6 +27,8 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import static org.hamcrest.MatcherAssert.assertThat;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -107,8 +109,8 @@
   private File getBlockForFile(Path path, boolean exists) throws IOException {
     LocatedBlocks blocks = nn.getRpcServer().getBlockLocations(path.toString(),
         0, Long.MAX_VALUE);
-    assertEquals("The test helper functions assume that each file has a single block",
-                 1, blocks.getLocatedBlocks().size());
+      assertEquals(
+              1, blocks.getLocatedBlocks().size(), "The test helper functions assume that each file has a single block");
     ExtendedBlock block = blocks.getLocatedBlocks().get(0).getBlock();
     BlockLocalPathInfo bInfo = dn0.getFSDataset().getBlockLocalPathInfo(block);
     File blockFile = new File(bInfo.getBlockPath());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java
index ead7baa..2b1040a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -34,9 +34,9 @@
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StandardSocketFactory;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import javax.net.SocketFactory;
 import java.io.IOException;
@@ -59,12 +59,12 @@
       LoggerFactory.getLogger(TestDataNodeTcpNoDelay.class);
   private static Configuration baseConf;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpBeforeClass() throws Exception {
     baseConf = new HdfsConfiguration();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownAfterClass() throws Exception {
 
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTransferSocketSize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTransferSocketSize.java
index 0e98b86..c4b240b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTransferSocketSize.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTransferSocketSize.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.List;
 
@@ -26,7 +26,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestDataNodeTransferSocketSize {
 
@@ -40,8 +40,8 @@
     try {
       List<DataNode> datanodes = cluster.getDataNodes();
       DataNode datanode = datanodes.get(0);
-      assertEquals("Receive buffer size should be 4K",
-        4 * 1024, datanode.getXferServer().getPeerServer().getReceiveBufferSize());
+        assertEquals(
+                4 * 1024, datanode.getXferServer().getPeerServer().getReceiveBufferSize(), "Receive buffer size should be 4K");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -59,9 +59,9 @@
     try {
       List<DataNode> datanodes = cluster.getDataNodes();
       DataNode datanode = datanodes.get(0);
-      assertTrue(
-        "Receive buffer size should be a default value (determined by kernel)",
-        datanode.getXferServer().getPeerServer().getReceiveBufferSize() > 0);
+        assertTrue(
+                datanode.getXferServer().getPeerServer().getReceiveBufferSize() > 0,
+                "Receive buffer size should be a default value (determined by kernel)");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
index 80cc7d5..89359b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
@@ -32,9 +32,7 @@
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestDataNodeUUID {
 
@@ -96,19 +94,19 @@
       // on the second disk
       MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
       FileUtils.deleteDirectory(disk2);
-      assertTrue("Failed to recreate the data directory: " + disk2,
-              disk2.mkdirs());
+        assertTrue(
+                disk2.mkdirs(), "Failed to recreate the data directory: " + disk2);
 
-      // Restart and check if the UUID changed
-      assertTrue("DataNode failed to start up: " + dn,
-              cluster.restartDataNode(dn));
+        // Restart and check if the UUID changed
+        assertTrue(
+                cluster.restartDataNode(dn), "DataNode failed to start up: " + dn);
       // We need to wait until the DN has completed registration
       while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) {
         Thread.sleep(50);
       }
-      assertEquals(
-              "DN generated a new UUID despite disk1 having it intact",
-              originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid());
+        assertEquals(
+                originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid(),
+                "DN generated a new UUID despite disk1 having it intact");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 6b5faee..703b167 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -20,13 +20,9 @@
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -83,10 +79,10 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -122,7 +118,7 @@
   @Rule
   public Timeout timeout = new Timeout(120 * 1000);
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // bring up a cluster of 2
     conf = new HdfsConfiguration();
@@ -138,7 +134,7 @@
     dataDir = new File(cluster.getDataDirectory());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if(data_fail != null) {
       FileUtil.setWritable(data_fail, true);
@@ -552,8 +548,8 @@
     assumeNotWindows();
 
     failedDir = new File(dataDir, "failedDir");
-    assertTrue("Failed to fail a volume by setting it non-writable",
-        failedDir.mkdir() && failedDir.setReadOnly());
+      assertTrue(
+              failedDir.mkdir() && failedDir.setReadOnly(), "Failed to fail a volume by setting it non-writable");
 
     startNewDataNodeWithDiskFailure(new File(failedDir, "newDir1"), false);
   }
@@ -569,8 +565,8 @@
     assumeNotWindows();
 
     failedDir = new File(dataDir, "failedDir");
-    assertTrue("Failed to fail a volume by setting it non-writable",
-        failedDir.mkdir() && failedDir.setReadOnly());
+      assertTrue(
+              failedDir.mkdir() && failedDir.setReadOnly(), "Failed to fail a volume by setting it non-writable");
 
     startNewDataNodeWithDiskFailure(new File(failedDir, "newDir1"), true);
   }
@@ -584,8 +580,8 @@
     assumeNotWindows();
 
     final File readOnlyDir = new File(dataDir, "nonWritable");
-    assertTrue("Set the data dir permission non-writable",
-        readOnlyDir.mkdir() && readOnlyDir.setReadOnly());
+      assertTrue(
+              readOnlyDir.mkdir() && readOnlyDir.setReadOnly(), "Set the data dir permission non-writable");
 
     startNewDataNodeWithDiskFailure(new File(readOnlyDir, "newDir1"), false);
   }
@@ -600,8 +596,8 @@
     assumeNotWindows();
 
     final File readOnlyDir = new File(dataDir, "nonWritable");
-    assertTrue("Set the data dir permission non-writable",
-        readOnlyDir.mkdir() && readOnlyDir.setReadOnly());
+      assertTrue(
+              readOnlyDir.mkdir() && readOnlyDir.setReadOnly(), "Set the data dir permission non-writable");
     startNewDataNodeWithDiskFailure(new File(readOnlyDir, "newDir1"), true);
   }
 
@@ -624,9 +620,9 @@
 
     try {
       cluster.startDataNodes(newConf, 1, false, null, null);
-      assertTrue("Failed to get expected IOException", tolerated);
+        assertTrue(tolerated, "Failed to get expected IOException");
     } catch (IOException ioe) {
-      assertFalse("Unexpected IOException " + ioe, tolerated);
+        assertFalse(tolerated, "Unexpected IOException " + ioe);
       return;
     }
 
@@ -659,14 +655,14 @@
 
     for(String bid : block_map.keySet()) {
       BlockLocs bl = block_map.get(bid);
-      // System.out.println(bid + "->" + bl.num_files + "vs." + bl.num_locs);
-      // number of physical files (1 or 2) should be same as number of datanodes
-      // in the list of the block locations
-      assertEquals("Num files should match num locations",
-          bl.num_files, bl.num_locs);
+        // System.out.println(bid + "->" + bl.num_files + "vs." + bl.num_locs);
+        // number of physical files (1 or 2) should be same as number of datanodes
+        // in the list of the block locations
+        assertEquals(
+                bl.num_files, bl.num_locs, "Num files should match num locations");
     }
-    assertEquals("Num physical blocks should match num stored in the NN",
-        totalReal, totalNN);
+      assertEquals(
+              totalReal, totalNN, "Num physical blocks should match num stored in the NN");
 
     // now check the number of under-replicated blocks
     FSNamesystem fsn = cluster.getNamesystem();
@@ -682,9 +678,9 @@
     System.out.println("total blocks (real and replicating):" + 
         (totalReal + totalRepl) + " vs. all files blocks " + blocks_num*2);
 
-    // together all the blocks should be equal to all real + all underreplicated
-    assertEquals("Incorrect total block count",
-        totalReal + totalRepl, blocks_num * repl);
+      // together all the blocks should be equal to all real + all underreplicated
+      assertEquals(
+              totalReal + totalRepl, blocks_num * repl, "Incorrect total block count");
   }
   
   /**
@@ -843,8 +839,8 @@
         //int ii = 0;
         for(File f: res) {
           String s = f.getName();
-          // cut off "blk_-" at the beginning and ".meta" at the end
-          assertNotNull("Block file name should not be null", s);
+            // cut off "blk_-" at the beginning and ".meta" at the end
+            assertNotNull(s, "Block file name should not be null");
           String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_"));
           //System.out.println(ii++ + ". block " + s + "; id=" + bid);
           BlockLocs val = map.get(bid);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index 6011d6e..70bfae1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -20,12 +20,7 @@
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.lang.management.ManagementFactory;
@@ -56,11 +51,11 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 /**
@@ -92,7 +87,7 @@
   @Rule
   public Timeout timeout = new Timeout(120 * 1000);
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // These tests use DataNodeTestUtils#injectDataDirFailure() to simulate
     // volume failures which is currently not supported on Windows.
@@ -101,7 +96,7 @@
     initCluster(1, 2, 1);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     IOUtils.cleanupWithLogger(LOG, fs);
     if (cluster != null) {
@@ -156,9 +151,9 @@
     DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file1, (short)3);
     ArrayList<DataNode> dns = cluster.getDataNodes();
-    assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
-    assertTrue("DN2 should be up", dns.get(1).isDatanodeUp());
-    assertTrue("DN3 should be up", dns.get(2).isDatanodeUp());
+      assertTrue(dns.get(0).isDatanodeUp(), "DN1 should be up");
+      assertTrue(dns.get(1).isDatanodeUp(), "DN2 should be up");
+      assertTrue(dns.get(2).isDatanodeUp(), "DN3 should be up");
 
     /*
      * The metrics should confirm the volume failures.
@@ -186,7 +181,7 @@
     Path file2 = new Path("/test2");
     DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file2, (short)3);
-    assertTrue("DN3 should still be up", dns.get(2).isDatanodeUp());
+      assertTrue(dns.get(2).isDatanodeUp(), "DN3 should still be up");
     checkFailuresAtDataNode(dns.get(2), 1, true, dn3Vol1.getAbsolutePath());
 
     DataNodeTestUtils.triggerHeartbeat(dns.get(2));
@@ -338,9 +333,9 @@
     DFSTestUtil.waitReplication(fs, file2, (short)3);
 
     ArrayList<DataNode> dns = cluster.getDataNodes();
-    assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
-    assertTrue("DN2 should be up", dns.get(1).isDatanodeUp());
-    assertTrue("DN3 should be up", dns.get(2).isDatanodeUp());
+      assertTrue(dns.get(0).isDatanodeUp(), "DN1 should be up");
+      assertTrue(dns.get(1).isDatanodeUp(), "DN2 should be up");
+      assertTrue(dns.get(2).isDatanodeUp(), "DN3 should be up");
 
     checkFailuresAtDataNode(dns.get(0), 1, true, dn1Vol1.getAbsolutePath(),
         dn1Vol2.getAbsolutePath());
@@ -387,9 +382,9 @@
     DFSTestUtil.waitReplication(fs, file1, (short)2);
 
     ArrayList<DataNode> dns = cluster.getDataNodes();
-    assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
-    assertTrue("DN2 should be up", dns.get(1).isDatanodeUp());
-    assertTrue("DN3 should be up", dns.get(2).isDatanodeUp());
+      assertTrue(dns.get(0).isDatanodeUp(), "DN1 should be up");
+      assertTrue(dns.get(1).isDatanodeUp(), "DN2 should be up");
+      assertTrue(dns.get(2).isDatanodeUp(), "DN3 should be up");
 
     checkFailuresAtDataNode(dns.get(0), 1, true, dn1Vol1.getAbsolutePath());
     checkFailuresAtDataNode(dns.get(1), 1, true, dn2Vol1.getAbsolutePath());
@@ -478,8 +473,8 @@
     cluster.waitActive();
     ArrayList<DataNode> dns = cluster.getDataNodes();
     DataNode dn = dns.get(0);
-    assertFalse("DataNode should not reformat if VERSION is missing",
-        currentVersion.exists());
+      assertFalse(
+              currentVersion.exists(), "DataNode should not reformat if VERSION is missing");
 
     // Make sure DN's JMX sees the failed volume
     final String[] expectedFailedVolumes = {dn1Vol1.getAbsolutePath()};
@@ -516,8 +511,8 @@
     assertTrue(cluster.restartDataNodes(true));
     // the DN should tolerate one volume failure.
     cluster.waitActive();
-    assertFalse("DataNode should not reformat if VERSION is missing",
-        currentVersion.exists());
+      assertFalse(
+              currentVersion.exists(), "DataNode should not reformat if VERSION is missing");
   }
 
   /**
@@ -538,7 +533,7 @@
         "Hadoop:service=DataNode,name=FSDatasetState-" + dn0.getDatanodeUuid());
     int numFailedVolumes = (int) mbs.getAttribute(mxbeanName,
         "NumFailedVolumes");
-    Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
+    Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
         numFailedVolumes);
     checkFailuresAtDataNode(dn0, 0, false, new String[] {});
 
@@ -548,8 +543,8 @@
     DataNodeTestUtils.waitForDiskError(dn0,
         DataNodeTestUtils.getVolume(dn0, dn0Vol1));
     numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
-    Assert.assertEquals(1, numFailedVolumes);
-    Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
+    Assertions.assertEquals(1, numFailedVolumes);
+    Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
             numFailedVolumes);
     checkFailuresAtDataNode(dn0, 1, true,
         new String[] {dn0Vol1.getAbsolutePath()});
@@ -561,12 +556,12 @@
           oldDataDirs);
       fail("Reconfigure with failed disk should throw exception.");
     } catch (ReconfigurationException e) {
-      Assert.assertTrue("Reconfigure exception doesn't have expected path!",
-          e.getCause().getMessage().contains(dn0Vol1.getAbsolutePath()));
+        Assertions.assertTrue(
+                e.getCause().getMessage().contains(dn0Vol1.getAbsolutePath()), "Reconfigure exception doesn't have expected path!");
     }
     numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
-    Assert.assertEquals(1, numFailedVolumes);
-    Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
+    Assertions.assertEquals(1, numFailedVolumes);
+    Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
         numFailedVolumes);
     checkFailuresAtDataNode(dn0, 1, true,
         new String[] {dn0Vol1.getAbsolutePath()});
@@ -577,8 +572,8 @@
     dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
             dataDirs);
     numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
-    Assert.assertEquals(0, numFailedVolumes);
-    Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
+    Assertions.assertEquals(0, numFailedVolumes);
+    Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
             numFailedVolumes);
     checkFailuresAtDataNode(dn0, 0, true, new String[] {});
 
@@ -588,8 +583,8 @@
     dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
             oldDataDirs);
     numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
-    Assert.assertEquals(0, numFailedVolumes);
-    Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
+    Assertions.assertEquals(0, numFailedVolumes);
+    Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
         numFailedVolumes);
     checkFailuresAtDataNode(dn0, 0, true, new String[] {});
 
@@ -599,8 +594,8 @@
     DataNodeTestUtils.waitForDiskError(dn0,
         DataNodeTestUtils.getVolume(dn0, dn0Vol2));
     numFailedVolumes = (int) mbs.getAttribute(mxbeanName, "NumFailedVolumes");
-    Assert.assertEquals(1, numFailedVolumes);
-    Assert.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
+    Assertions.assertEquals(1, numFailedVolumes);
+    Assertions.assertEquals(dn0.getFSDataset().getNumFailedVolumes(),
         numFailedVolumes);
     checkFailuresAtDataNode(dn0, 1, true,
         new String[] {dn0Vol2.getAbsolutePath()});
@@ -658,9 +653,9 @@
     LOG.info(strBuilder.toString());
     final long actualVolumeFailures =
         getLongCounter("VolumeFailures", getMetrics(dn.getMetrics().name()));
-    assertTrue("Actual async detected volume failures should be greater or " +
-        "equal than " + expectedFailedVolumes,
-        actualVolumeFailures >= expectedVolumeFailuresCounter);
+      assertTrue(
+              actualVolumeFailures >= expectedVolumeFailuresCounter, "Actual async detected volume failures should be greater or " +
+              "equal than " + expectedFailedVolumes);
     assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
     assertArrayEquals(expectedFailedVolumes,
         convertToAbsolutePaths(fsd.getFailedStorageLocations()));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
index 6165f05..26df549 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -37,10 +35,10 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 /**
@@ -63,7 +61,7 @@
   @Rule
   public Timeout timeout = new Timeout(120 * 1000);
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
@@ -81,7 +79,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -122,14 +120,14 @@
     cluster.waitActive();
 
     try {
-      assertTrue("The DN should have started up fine.",
-          cluster.isDataNodeUp());
+        assertTrue(
+                cluster.isDataNodeUp(), "The DN should have started up fine.");
       DataNode dn = cluster.getDataNodes().get(0);
       String si = DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
-      assertTrue("The DN should have started with this directory",
-          si.contains(dataDir1Actual.getPath()));
-      assertFalse("The DN shouldn't have a bad directory.",
-          si.contains(dataDir2Actual.getPath()));
+        assertTrue(
+                si.contains(dataDir1Actual.getPath()), "The DN should have started with this directory");
+        assertFalse(
+                si.contains(dataDir2Actual.getPath()), "The DN shouldn't have a bad directory.");
     } finally {
       cluster.shutdownDataNodes();
       FileUtil.chmod(dataDir2.toString(), "755");
@@ -272,8 +270,8 @@
   private void prepareDirToFail(File dir) throws IOException,
       InterruptedException {
     dir.mkdirs();
-    assertEquals("Couldn't chmod local vol", 0,
-        FileUtil.chmod(dir.toString(), "000"));
+      assertEquals(0,
+              FileUtil.chmod(dir.toString(), "000"), "Couldn't chmod local vol");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
index 85054be..6d27c07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java
@@ -19,9 +19,7 @@
 
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -45,7 +43,7 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 /**
@@ -103,7 +101,7 @@
       }
 
       ArrayList<DataNode> dns = cluster.getDataNodes();
-      assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
+        assertTrue(dns.get(0).isDatanodeUp(), "DN1 should be up");
       final File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
 
       DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java
index f82462a..bbb6be5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java
@@ -26,9 +26,9 @@
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.io.File;
@@ -37,9 +37,7 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestDataStorage {
   private final static String DEFAULT_BPID = "bp-0";
@@ -55,18 +53,18 @@
   private NamespaceInfo nsInfo;
   private DataStorage storage;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     Configuration conf = new HdfsConfiguration();
     storage = new DataStorage();
     nsInfo = new NamespaceInfo(0, CLUSTER_ID, DEFAULT_BPID, CTIME,
         BUILD_VERSION, SOFTWARE_VERSION);
     FileUtil.fullyDelete(TEST_DIR);
-    assertTrue("Failed to make test dir.", TEST_DIR.mkdirs());
+      assertTrue(TEST_DIR.mkdirs(), "Failed to make test dir.");
     Mockito.when(mockDN.getConf()).thenReturn(conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     storage.unlockAll();
     FileUtil.fullyDelete(TEST_DIR);
@@ -206,7 +204,7 @@
     // create a fake directory under current/
     File currentDir = new File(sd.getCurrentDir(),
         "BP-787466439-172.26.24.43-1462305406642");
-    assertTrue("unable to mkdir " + currentDir.getName(), currentDir.mkdirs());
+      assertTrue(currentDir.mkdirs(), "unable to mkdir " + currentDir.getName());
 
     // Add volumes for multiple namespaces.
     List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace);
@@ -214,8 +212,8 @@
       storage.addStorageLocations(mockDN, ni, locations, START_OPT);
     }
 
-    // It should not format the directory because VERSION is missing.
-    assertTrue("Storage directory was formatted", currentDir.exists());
+      // It should not format the directory because VERSION is missing.
+      assertTrue(currentDir.exists(), "Storage directory was formatted");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java
index 43b149a..9f9cdac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java
@@ -36,7 +36,7 @@
 import org.apache.hadoop.util.DataChecksum;
 
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.mockito.ArgumentCaptor;
 
@@ -49,7 +49,7 @@
 import java.net.Socket;
 import java.util.UUID;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyLong;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
index 611360d..7064249 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
@@ -30,7 +30,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.mockito.ArgumentCaptor;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
index 905cc2a..a5262f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -50,10 +50,10 @@
 import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -85,7 +85,7 @@
    * Starts an instance of DataNode
    * @throws IOException
    */
-  @Before
+  @BeforeEach
   public void startUp() throws IOException, URISyntaxException {
     tearDownDone = false;
     conf = new HdfsConfiguration();
@@ -107,7 +107,7 @@
    * Cleans the resources and closes the instance of datanode
    * @throws IOException if an error occurred
    */
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (!tearDownDone && dn != null) {
       try {
@@ -117,8 +117,8 @@
       } finally {
         File dir = new File(DATA_DIR);
         if (dir.exists())
-          Assert.assertTrue(
-              "Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
+            Assertions.assertTrue(FileUtil.fullyDelete(dir),
+                    "Cannot delete data-node dirs");
       }
       tearDownDone = true;
     }
@@ -222,7 +222,7 @@
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals(NN_ADDR, nnAddr);
+        Assertions.assertEquals(NN_ADDR, nnAddr);
         return namenode;
       }
     };
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
index 7c1c27b..7bb46a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -43,9 +43,9 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.VersionInfo;
 
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -61,7 +61,7 @@
   NamespaceInfo fakeNsInfo;
   DNConf mockDnConf;
   
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     mockDnConf = mock(DNConf.class);
     doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
@@ -158,7 +158,7 @@
       localActor.stop();
       localActor.register(nsInfo);
     } catch (IOException e) {
-      Assert.assertEquals("DN shut down before block pool registered",
+      Assertions.assertEquals("DN shut down before block pool registered",
           e.getMessage());
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeStartupOptions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeStartupOptions.java
index 1a868fe..1c97bcd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeStartupOptions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeStartupOptions.java
@@ -21,11 +21,11 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
 
 /**
  * This test verifies DataNode command line processing.
@@ -69,7 +69,7 @@
    * Reinitialize configuration before every test since DN stores the
    * parsed StartupOption in the configuration.
    */
-  @Before
+  @BeforeEach
   public void initConfiguration() {
     conf = new HdfsConfiguration();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
index e3b4267..31cc517 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 
@@ -32,7 +30,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests deleteBlockPool functionality.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 7f79778..f637d28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -21,11 +21,7 @@
 import static org.apache.hadoop.util.Shell.getMemlockLimit;
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -77,8 +73,8 @@
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.SimpleLayout;
 import org.apache.log4j.WriterAppender;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -112,7 +108,7 @@
     return configuration;
   }
 
-  @Before
+  @BeforeEach
   public void setup() {
     LazyPersistTestCase.initCacheManipulator();
   }
@@ -449,10 +445,10 @@
           " for the deleted block";
       String dirStructureWarnLog = " found in invalid directory." +
           "  Expected directory: ";
-      assertFalse("directory check print meaningless warning message",
-          logContent.contains(dirStructureWarnLog));
-      assertTrue("missing block warn log not appear",
-          logContent.contains(missingBlockWarn));
+        assertFalse(
+                logContent.contains(dirStructureWarnLog), "directory check print meaningless warning message");
+        assertTrue(
+                logContent.contains(missingBlockWarn), "missing block warn log not appear");
       LOG.info("check pass");
 
     } finally {
@@ -647,11 +643,11 @@
       scan(totalBlocks + 3, 6, 2, 2, 3, 2);
       scan(totalBlocks + 1, 0, 0, 0, 0, 0);
 
-      // Test14: make sure no throttling is happening
-      assertTrue("Throttle appears to be engaged",
-          scanner.timeWaitingMs.get() < 10L);
-      assertTrue("Report complier threads logged no execution time",
-          scanner.timeRunningMs.get() > 0L);
+        // Test14: make sure no throttling is happening
+        assertTrue(
+                scanner.timeWaitingMs.get() < 10L, "Throttle appears to be engaged");
+        assertTrue(
+                scanner.timeRunningMs.get() > 0L, "Report complier threads logged no execution time");
 
       scanner.shutdown();
       assertFalse(scanner.getRunStatus());
@@ -718,8 +714,8 @@
 
       // Waiting should be about 9x running.
       LOG.info("RATIO: " + ratio);
-      assertTrue("Throttle is too restrictive", ratio <= 10f);
-      assertTrue("Throttle is too permissive" + ratio, ratio >= 7f);
+        assertTrue(ratio <= 10f, "Throttle is too restrictive");
+        assertTrue(ratio >= 7f, "Throttle is too permissive" + ratio);
 
       // Test with a different limit
       conf.setInt(
@@ -736,8 +732,8 @@
 
       // Waiting should be about 4x running.
       LOG.info("RATIO: " + ratio);
-      assertTrue("Throttle is too restrictive", ratio <= 4.5f);
-      assertTrue("Throttle is too permissive", ratio >= 2.75f);
+        assertTrue(ratio <= 4.5f, "Throttle is too restrictive");
+        assertTrue(ratio >= 2.75f, "Throttle is too permissive");
 
       // Test with more than 1 thread
       conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 3);
@@ -755,8 +751,8 @@
 
       // Waiting should be about 9x running.
       LOG.info("RATIO: " + ratio);
-      assertTrue("Throttle is too restrictive", ratio <= 10f);
-      assertTrue("Throttle is too permissive", ratio >= 7f);
+        assertTrue(ratio <= 10f, "Throttle is too restrictive");
+        assertTrue(ratio >= 7f, "Throttle is too permissive");
 
       // Test with no limit
       scanner = new DirectoryScanner(fds, getConfiguration());
@@ -765,10 +761,10 @@
       scanner.shutdown();
       assertFalse(scanner.getRunStatus());
 
-      assertTrue("Throttle appears to be engaged",
-          scanner.timeWaitingMs.get() < 10L);
-      assertTrue("Report complier threads logged no execution time",
-          scanner.timeRunningMs.get() > 0L);
+        assertTrue(
+                scanner.timeWaitingMs.get() < 10L, "Throttle appears to be engaged");
+        assertTrue(
+                scanner.timeRunningMs.get() > 0L, "Report complier threads logged no execution time");
 
       // Test with a 1ms limit. This also tests whether the scanner can be
       // shutdown cleanly in mid stride.
@@ -807,8 +803,8 @@
           if (finalMs > 0) {
             LOG.info("Scanner took " + (Time.monotonicNow() - finalMs)
                 + "ms to shutdown");
-            assertTrue("Scanner took too long to shutdown",
-                Time.monotonicNow() - finalMs < 1000L);
+              assertTrue(
+                      Time.monotonicNow() - finalMs < 1000L, "Scanner took too long to shutdown");
           }
 
           ratio =
@@ -821,9 +817,9 @@
 
       // We just want to test that it waits a lot, but it also runs some
       LOG.info("RATIO: " + ratio);
-      assertTrue("Throttle is too permissive", ratio > 8);
-      assertTrue("Report complier threads logged no execution time",
-          scanner.timeRunningMs.get() > 0L);
+        assertTrue(ratio > 8, "Throttle is too permissive");
+        assertTrue(
+                scanner.timeRunningMs.get() > 0L, "Report complier threads logged no execution time");
 
       // Test with a 0 limit, i.e. disabled
       conf.setInt(
@@ -835,10 +831,10 @@
       scanner.shutdown();
       assertFalse(scanner.getRunStatus());
 
-      assertTrue("Throttle appears to be engaged",
-          scanner.timeWaitingMs.get() < 10L);
-      assertTrue("Report complier threads logged no execution time",
-          scanner.timeRunningMs.get() > 0L);
+        assertTrue(
+                scanner.timeWaitingMs.get() < 10L, "Throttle appears to be engaged");
+        assertTrue(
+                scanner.timeRunningMs.get() > 0L, "Report complier threads logged no execution time");
 
       // Test with a 1000 limit, i.e. disabled
       conf.setInt(
@@ -850,10 +846,10 @@
       scanner.shutdown();
       assertFalse(scanner.getRunStatus());
 
-      assertTrue("Throttle appears to be engaged",
-          scanner.timeWaitingMs.get() < 10L);
-      assertTrue("Report complier threads logged no execution time",
-          scanner.timeRunningMs.get() > 0L);
+        assertTrue(
+                scanner.timeWaitingMs.get() < 10L, "Throttle appears to be engaged");
+        assertTrue(
+                scanner.timeRunningMs.get() > 0L, "Report complier threads logged no execution time");
 
       // Test that throttle works from regular start
       conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
@@ -874,7 +870,7 @@
 
       scanner.shutdown();
       assertFalse(scanner.getRunStatus());
-      assertTrue("Throttle does not appear to be engaged", count > 0);
+        assertTrue(count > 0, "Throttle does not appear to be engaged");
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index 53be71f..65963fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.DataOutputStream;
 import java.io.File;
@@ -53,9 +53,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 /**
@@ -67,7 +67,7 @@
   private MiniDFSCluster cluster;
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
@@ -79,7 +79,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -111,9 +111,9 @@
     storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
     File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
     try {
-      // make the data directory of the first datanode to be readonly
-      assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
-      assertTrue("Couldn't chmod local vol", dir2.setReadOnly());
+        // make the data directory of the first datanode to be readonly
+        assertTrue(dir1.setReadOnly(), "Couldn't chmod local vol");
+        assertTrue(dir2.setReadOnly(), "Couldn't chmod local vol");
 
       // create files and make sure that first datanode will be down
       DataNode dn = cluster.getDataNodes().get(dnIndex);
@@ -145,7 +145,7 @@
     // get the block belonged to the created file
     LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), fileName.toString(), 0, (long)fileLen);
-    assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
+      assertEquals(blocks.locatedBlockCount(), 1, "Should only find 1 block");
     LocatedBlock block = blocks.get(0);
 
     // bring up a second datanode
@@ -207,8 +207,8 @@
         for (FsVolumeSpi vol : volumes) {
           Path dataDir = new Path(vol.getStorageLocation().getNormalizedUri());
           FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
-          assertEquals("Permission for dir: " + dataDir + ", is " + actual +
-              ", while expected is " + expected, expected, actual);
+            assertEquals(expected, actual, "Permission for dir: " + dataDir + ", is " + actual +
+                    ", while expected is " + expected);
         }
       }
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
index 9d24ccb..1f79f10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
@@ -32,15 +32,16 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY;
+import static org.hamcrest.MatcherAssert.assertThat;
+
 import org.apache.hadoop.test.GenericTestUtils;
 
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.times;
@@ -73,7 +74,7 @@
     bpid = cluster.getNamesystem().getBlockPoolId();
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (cluster != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java
index fd72804..bbd6ce0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.File;
 import java.nio.ByteBuffer;
@@ -45,9 +45,9 @@
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.util.NativeCodeLoader;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -64,7 +64,7 @@
 
   private static final int BLOCK_SIZE = 4096;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
     NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
@@ -72,7 +72,7 @@
     sockDir = new TemporarySocketDirectory();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     // Restore the original CacheManipulator
     NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
index 10f371b..46dd31c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
@@ -39,7 +39,7 @@
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.SequenceFile.Writer;
 import org.apache.hadoop.io.compress.DefaultCodec;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestHSync {
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
index 0d359d8..eddcf1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
index e848cbf..6cc08f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.atLeastOnce;
@@ -40,8 +40,8 @@
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 /**
@@ -65,7 +65,7 @@
   private BPServiceActor actor;   // BPSA to use for block injection.
   private String storageUuid;     // DatanodeStorage to use for block injection.
 
-  @Before
+  @BeforeEach
   public void startCluster() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_COUNT).build();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java
index c556d0d..9c88444 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java
@@ -20,8 +20,10 @@
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -46,9 +48,9 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This test verifies that incremental block reports from a single DataNode are
@@ -89,7 +91,7 @@
     GenericTestUtils.setLogLevel(TestIncrementalBrVariations.LOG, Level.TRACE);
   }
 
-  @Before
+  @BeforeEach
   public void startUpCluster() throws IOException {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
@@ -101,7 +103,7 @@
     dn0Reg = dn0.getDNRegistrationForBP(poolId);
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (cluster != null) {
       client.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestLargeBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestLargeBlockReport.java
index 1ea52a0..eb29231 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestLargeBlockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestLargeBlockReport.java
@@ -19,7 +19,7 @@
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -37,9 +37,9 @@
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -59,13 +59,13 @@
   private final long reportId = 1;
   private final long fullBrLeaseId = 0;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() {
     DFSTestUtil.setNameNodeLogLevel(Level.WARN);
     FsDatasetImplTestUtils.setFsDatasetImplLogLevel(Level.WARN);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
index a7e8b1e..1ba6824 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestProvidedReplicaImpl.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -35,8 +35,8 @@
 import org.apache.commons.io.input.BoundedInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -90,7 +90,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     createFileIfNotExists(new File(BASE_DIR).getAbsolutePath());
     createProvidedReplicas(new Configuration());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java
index 106c515..f881456 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.hamcrest.CoreMatchers.*;
-import static org.junit.Assert.*;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State.*;
 
 import java.io.IOException;
@@ -47,10 +47,9 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables;
 
 /**
@@ -92,7 +91,7 @@
    * Setup a {@link MiniDFSCluster}.
    * Create a block with both {@link State#NORMAL} and {@link State#READ_ONLY_SHARED} replicas.
    */
-  @Before
+  @BeforeEach
   public void setup() throws IOException, InterruptedException {
     conf = new HdfsConfiguration();
     SimulatedFSDataset.setFactory(conf);
@@ -152,7 +151,7 @@
     waitForLocations(2);
   }
   
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     fs.delete(PATH, false);
     
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
index 60d4cca..8bc55a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
index dde9ad5..b164e73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -39,8 +36,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.DataChecksum;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * this class tests the methods of the  SimulatedFSDataset.
@@ -62,7 +59,7 @@
     this.storageCount = storageCount;
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     SimulatedFSDataset.setFactory(conf);
@@ -141,7 +138,7 @@
     ExtendedBlock b = new ExtendedBlock(bpid, FIRST_BLK_ID, 5, 0);
     try {
       assertTrue(fsdataset.getMetaDataInputStream(b) == null);
-      assertTrue("Expected an IO exception", false);
+        assertTrue(false, "Expected an IO exception");
     } catch (IOException e) {
       // ok - as expected
     }
@@ -250,7 +247,7 @@
       sfsdataset = getSimulatedFSDataset();
       sfsdataset.addBlockPool(bpid, conf);
       injectBlocksFromBlockReport(fsdataset, sfsdataset);
-      assertTrue("Expected an IO exception", false);
+        assertTrue(false, "Expected an IO exception");
     } catch (IOException e) {
       // ok - as expected
     }
@@ -261,21 +258,21 @@
     assertFalse(fsdataset.isValidBlock(b));
     try {
       fsdataset.getLength(b);
-      assertTrue("Expected an IO exception", false);
+        assertTrue(false, "Expected an IO exception");
     } catch (IOException e) {
       // ok - as expected
     }
     
     try {
       fsdataset.getBlockInputStream(b);
-      assertTrue("Expected an IO exception", false);
+        assertTrue(false, "Expected an IO exception");
     } catch (IOException e) {
       // ok - as expected
     }
     
     try {
       fsdataset.finalizeBlock(b, false);
-      assertTrue("Expected an IO exception", false);
+        assertTrue(false, "Expected an IO exception");
     } catch (IOException e) {
       // ok - as expected
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDatasetWithMultipleStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDatasetWithMultipleStorages.java
index b31ae98..9cf182c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDatasetWithMultipleStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDatasetWithMultipleStorages.java
@@ -17,11 +17,11 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 
 /**
@@ -35,7 +35,7 @@
     super(2);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     super.setUp();
     conf.set(DFS_DATANODE_DATA_DIR_KEY, "data1,data2");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java
index 51a843b..62750c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java
@@ -17,8 +17,8 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -27,9 +27,9 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.Assume;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 import java.net.BindException;
 import java.net.InetSocketAddress;
@@ -59,7 +59,7 @@
 
   private void testExternalKdcRunning() {
     // Tests are skipped if external KDC is not running.
-    Assume.assumeTrue(isExternalKdcRunning());
+    Assumptions.assumeTrue(isExternalKdcRunning());
   }
 
   @Test
@@ -72,15 +72,15 @@
       String nnSpnegoPrincipal =
         System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
       String nnKeyTab = System.getProperty("dfs.namenode.keytab.file");
-      assertNotNull("NameNode principal was not specified", nnPrincipal);
-      assertNotNull("NameNode SPNEGO principal was not specified",
-                    nnSpnegoPrincipal);
-      assertNotNull("NameNode keytab was not specified", nnKeyTab);
+        assertNotNull(nnPrincipal, "NameNode principal was not specified");
+        assertNotNull(
+                nnSpnegoPrincipal, "NameNode SPNEGO principal was not specified");
+        assertNotNull(nnKeyTab, "NameNode keytab was not specified");
 
       String dnPrincipal = System.getProperty("dfs.datanode.kerberos.principal");
       String dnKeyTab = System.getProperty("dfs.datanode.keytab.file");
-      assertNotNull("DataNode principal was not specified", dnPrincipal);
-      assertNotNull("DataNode keytab was not specified", dnKeyTab);
+        assertNotNull(dnPrincipal, "DataNode principal was not specified");
+        assertNotNull(dnKeyTab, "DataNode keytab was not specified");
 
       Configuration conf = new HdfsConfiguration();
       conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
index 9df6209..508bc15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
@@ -34,15 +34,15 @@
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -59,7 +59,7 @@
   private DistributedFileSystem fs;
   static String bpid;
 
-  @Before
+  @BeforeEach
   public void startUpCluster() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf)
@@ -70,7 +70,7 @@
     bpid = cluster.getNamesystem().getBlockPoolId();
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (cluster != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
index 8653f4b..02f03a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
@@ -38,8 +38,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_WRITE_BANDWIDTHPERSEC_KEY;
@@ -68,9 +68,9 @@
       LOG.info("wait since replicas.size() == 0; i=" + i);
       Thread.sleep(1000);
     }
-    Assert.assertEquals(1, replicas.size());
+    Assertions.assertEquals(1, replicas.size());
     final ReplicaInfo r = replicas.iterator().next();
-    Assert.assertEquals(expectedState, r.getState());
+    Assertions.assertEquals(expectedState, r.getState());
     return (LocalReplicaInPipeline)r;
   }
 
@@ -106,7 +106,7 @@
         final DataNode oldnode = cluster.getDataNodes().get(0);
         // DataXceiverServer#writeThrottler is null if
         // dfs.datanode.data.write.bandwidthPerSec default value is 0.
-        Assert.assertNull(oldnode.xserver.getWriteThrottler());
+        Assertions.assertNull(oldnode.xserver.getWriteThrottler());
         oldrbw = getRbw(oldnode, bpid);
         LOG.info("oldrbw = " + oldrbw);
         
@@ -118,17 +118,17 @@
         // DataXceiverServer#writeThrottler#balancer is equal to
         // dfs.datanode.data.write.bandwidthPerSec value if
         // dfs.datanode.data.write.bandwidthPerSec value is not zero.
-        Assert.assertEquals(1024 * 1024 * 8,
+        Assertions.assertEquals(1024 * 1024 * 8,
             newnode.xserver.getWriteThrottler().getBandwidth());
         final DatanodeInfo oldnodeinfo;
         {
           final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc(
               ).getDatanodeReport(DatanodeReportType.LIVE);
-          Assert.assertEquals(2, datatnodeinfos.length);
+          Assertions.assertEquals(2, datatnodeinfos.length);
           int i = 0;
           for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid);
               i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++);
-          Assert.assertTrue(i < datatnodeinfos.length);
+          Assertions.assertTrue(i < datatnodeinfos.length);
           newnodeinfo = datatnodeinfos[i];
           oldnodeinfo = datatnodeinfos[1 - i];
         }
@@ -138,15 +138,15 @@
             oldrbw.getGenerationStamp());
         final BlockOpResponseProto s = DFSTestUtil.transferRbw(
             b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
-        Assert.assertEquals(Status.SUCCESS, s.getStatus());
+        Assertions.assertEquals(Status.SUCCESS, s.getStatus());
       }
 
       //check new rbw
       final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
       LOG.info("newrbw = " + newrbw);
-      Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
-      Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());
-      Assert.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength());
+      Assertions.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
+      Assertions.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());
+      Assertions.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength());
 
       LOG.info("DONE");
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java
index 14af74d..c2ecaf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java
@@ -40,7 +40,7 @@
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.net.InetSocketAddress;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
index fd05a47..52045d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
@@ -52,7 +52,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
 import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.*;
 
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java
index 7eb79b7..a980cf4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerFailures.java
@@ -24,8 +24,8 @@
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.*;
 import org.apache.hadoop.util.FakeTimer;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -34,8 +34,8 @@
 import java.util.concurrent.TimeUnit;
 import java.util.*;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
@@ -55,7 +55,7 @@
 
   private static final long MIN_DISK_CHECK_GAP_MS = 1000; // 1 second.
 
-  @Before
+  @BeforeEach
   public void commonInit() {
     timer = new FakeTimer();
     conf = new HdfsConfiguration();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java
index fa04474..3d6c441 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeCheckerTimeout.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.util.FakeTimer;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
index 80f0396..0e2a2e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java
@@ -42,7 +42,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
 import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.mockito.Mockito.*;
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java
index 318f8b2..87ffe9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncChecker.java
@@ -32,9 +32,7 @@
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.atomic.AtomicLong;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Verify functionality of {@link ThrottledAsyncChecker}.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
index dac5550..97dd83e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java
@@ -22,9 +22,9 @@
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;
 import org.apache.hadoop.util.FakeTimer;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.TestName;
 import org.junit.rules.Timeout;
 import org.slf4j.LoggerFactory;
@@ -38,8 +38,8 @@
 import java.util.concurrent.locks.ReentrantLock;
 
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.timeout;
@@ -61,7 +61,7 @@
     return new ScheduledThreadPoolExecutor(1);
   }
 
-  @Before
+  @BeforeEach
   public void initializeLock() {
     lock = new ReentrantLock();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java
index e439152..afc8226 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests the ability to create external FsDatasetSpi implementations.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
index 24a43e7..67b759c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
@@ -28,8 +28,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 
 public class TestAvailableSpaceVolumeChoosingPolicy {
@@ -89,11 +89,11 @@
     // than the threshold of 1MB.
     volumes.add(Mockito.mock(FsVolumeSpi.class));
     Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
         null));
   }
   
@@ -121,24 +121,24 @@
     // We should alternate assigning between the two volumes with a lot of free
     // space.
     initPolicy(policy, 1.0f);
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
         null));
 
     // All writes should be assigned to the volume with the least free space.
     initPolicy(policy, 0.0f);
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
         null));
   }
   
@@ -170,25 +170,25 @@
     // We should alternate assigning between the two volumes with a lot of free
     // space.
     initPolicy(policy, 1.0f);
-    Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100,
         null));
 
     // We should alternate assigning between the two volumes with less free
     // space.
     initPolicy(policy, 0.0f);
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
         null));
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100,
          null));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100,
         null));
   }
   
@@ -214,7 +214,7 @@
     // space to accept the replica size, and another volume does have enough
     // free space, that should be chosen instead.
     initPolicy(policy, 0.0f);
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes,
         1024L * 1024L * 2, null));
   }
   
@@ -242,7 +242,7 @@
 
     // Should still be able to get a volume for the replica even though the
     // available space on the second volume changed.
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes,
         100, null));
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
index fc99d3c..0b21b5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
@@ -27,8 +27,8 @@
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.ReflectionUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 public class TestRoundRobinVolumeChoosingPolicy {
@@ -55,20 +55,20 @@
     Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
 
     // Test two rounds of round-robin choosing
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null));
-    Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null));
+    Assertions.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null));
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null));
+    Assertions.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0, null));
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0, null));
 
     // The first volume has only 100L space, so the policy should
     // wisely choose the second one in case we ask for more.
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150,
         null));
 
     // Fail if no volume can be chosen?
     try {
       policy.chooseVolume(volumes, Long.MAX_VALUE, null);
-      Assert.fail();
+      Assertions.fail();
     } catch (IOException e) {
       // Passed.
     }
@@ -103,15 +103,15 @@
 
     // The first volume has only 100L space, so the policy should choose
     // the second one with additional available space configured as 100L.
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0,
         null));
-    Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0,
+    Assertions.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0,
         null));
 
     // Fail if no volume can be chosen?
     try {
       policy.chooseVolume(volumes, 100, null);
-      Assert.fail();
+      Assertions.fail();
     } catch (IOException e) {
       // Passed.
     }
@@ -141,12 +141,12 @@
     int blockSize = 700;
     try {
       policy.chooseVolume(volumes, blockSize, null);
-      Assert.fail("expected to throw DiskOutOfSpaceException");
+      Assertions.fail("expected to throw DiskOutOfSpaceException");
     } catch(DiskOutOfSpaceException e) {
-      Assert.assertEquals("Not returnig the expected message",
-          "Out of space: The volume with the most available space (=" + 600
-              + " B) is less than the block size (=" + blockSize + " B).",
-          e.getMessage());
+        Assertions.assertEquals(
+                "Out of space: The volume with the most available space (=" + 600
+                        + " B) is less than the block size (=" + blockSize + " B).",
+                e.getMessage(), "Not returnig the expected message");
     }
   }
 
@@ -183,23 +183,23 @@
             .thenReturn(StorageType.SSD);
     Mockito.when(ssdVolumes.get(1).getAvailable()).thenReturn(100L);
 
-    Assert.assertEquals(diskVolumes.get(0),
+    Assertions.assertEquals(diskVolumes.get(0),
             policy.chooseVolume(diskVolumes, 0, null));
     // Independent Round-Robin for different storage type
-    Assert.assertEquals(ssdVolumes.get(0),
+    Assertions.assertEquals(ssdVolumes.get(0),
             policy.chooseVolume(ssdVolumes, 0, null));
     // Take block size into consideration
-    Assert.assertEquals(ssdVolumes.get(0),
+    Assertions.assertEquals(ssdVolumes.get(0),
             policy.chooseVolume(ssdVolumes, 150L, null));
 
-    Assert.assertEquals(diskVolumes.get(1),
+    Assertions.assertEquals(diskVolumes.get(1),
             policy.chooseVolume(diskVolumes, 0, null));
-    Assert.assertEquals(diskVolumes.get(0),
+    Assertions.assertEquals(diskVolumes.get(0),
             policy.chooseVolume(diskVolumes, 50L, null));
 
     try {
       policy.chooseVolume(diskVolumes, 200L, null);
-      Assert.fail("Should throw an DiskOutOfSpaceException before this!");
+      Assertions.fail("Should throw an DiskOutOfSpaceException before this!");
     } catch (DiskOutOfSpaceException e) {
       // Pass.
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
index 9095594..a6932a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
@@ -35,8 +35,8 @@
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class FsDatasetTestUtil {
 
@@ -104,9 +104,9 @@
     try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
         FileChannel channel = raf.getChannel()) {
       FileLock lock = channel.tryLock();
-      assertNotNull(String.format(
-          "Lock file at %s appears to be held by a different process.",
-          lockFile.getAbsolutePath()), lock);
+        assertNotNull(lock, String.format(
+                "Lock file at %s appears to be held by a different process.",
+                lockFile.getAbsolutePath()));
       if (lock != null) {
         try {
           lock.release();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 9774fea1..6d3b416 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -30,10 +30,10 @@
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.util.Shell.getMemlockLimit;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -80,8 +80,8 @@
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
 import org.junit.Rule;
+import org.junit.jupiter.api.AfterEach;
 import org.junit.rules.Timeout;
 import org.slf4j.event.Level;
 
@@ -129,7 +129,7 @@
   protected JMXGet jmx;
   protected TemporarySocketDirectory sockDir;
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws Exception {
 
     // Dump all RamDisk JMX metrics before shutdown the cluster
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestAddBlockPoolException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestAddBlockPoolException.java
index 79529ea..1186a17 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestAddBlockPoolException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestAddBlockPoolException.java
@@ -17,14 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 import java.io.IOException;
 import java.util.concurrent.ConcurrentHashMap;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests to ensure AddBlockPoolException behaves correctly when additional
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java
index 9575028..60961f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestCacheByPmemMappableBlockLoader.java
@@ -23,10 +23,7 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PMEM_CACHE_DIRS_KEY;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -54,12 +51,12 @@
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 import java.util.function.Supplier;
@@ -102,7 +99,7 @@
         LoggerFactory.getLogger(FsDatasetCache.class), Level.DEBUG);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpClass() throws Exception {
     oldInjector = DataNodeFaultInjector.get();
     DataNodeFaultInjector.set(new DataNodeFaultInjector() {
@@ -118,12 +115,12 @@
     });
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownClass() throws Exception {
     DataNodeFaultInjector.set(oldInjector);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.setLong(
@@ -152,7 +149,7 @@
     cacheManager = ((FsDatasetImpl) dn.getFSDataset()).cacheManager;
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (fs != null) {
       fs.close();
@@ -211,7 +208,7 @@
     final int maxCacheBlocksNum =
         Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE);
     BlockReaderTestUtil.enableHdfsCachingTracing();
-    Assert.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE);
+    Assertions.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE);
     assertEquals(CACHE_CAPACITY, cacheManager.getCacheCapacity());
     // DRAM cache is expected to be disabled.
     assertEquals(0L, cacheManager.getMemCacheCapacity());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
index 0afee5f..ece11b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
@@ -46,8 +46,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /** Test if a datanode can correctly upgrade itself */
 public class TestDatanodeRestart {
@@ -127,13 +127,13 @@
       // check volumeMap: one rwr replica
       String bpid = cluster.getNamesystem().getBlockPoolId();
       ReplicaMap replicas = dataset(dn).volumeMap;
-      Assert.assertEquals(1, replicas.size(bpid));
+      Assertions.assertEquals(1, replicas.size(bpid));
       ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
-      Assert.assertEquals(ReplicaState.RWR, replica.getState());
+      Assertions.assertEquals(ReplicaState.RWR, replica.getState());
       if (isCorrupt) {
-        Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes());
+        Assertions.assertEquals((fileLen-1)/512*512, replica.getNumBytes());
       } else {
-        Assert.assertEquals(fileLen, replica.getNumBytes());
+        Assertions.assertEquals(fileLen, replica.getNumBytes());
       }
       dataset(dn).invalidate(bpid, new Block[]{replica});
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
index 8b1a6c0..fa815fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
@@ -20,8 +20,8 @@
 import net.jcip.annotations.NotThreadSafe;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyInt;
@@ -83,12 +83,12 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 import java.util.function.Supplier;
@@ -131,7 +131,7 @@
         LoggerFactory.getLogger(FsDatasetCache.class), Level.DEBUG);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpClass() throws Exception {
     oldInjector = DataNodeFaultInjector.get();
     DataNodeFaultInjector.set(new DataNodeFaultInjector() {
@@ -146,12 +146,12 @@
     });
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownClass() throws Exception {
     DataNodeFaultInjector.set(oldInjector);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.setLong(
@@ -179,7 +179,7 @@
     spyNN = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     // Verify that each test uncached whatever it cached.  This cleanup is
     // required so that file descriptors are not leaked across tests.
@@ -286,15 +286,15 @@
     // Get the details of the written file
     HdfsBlockLocation[] locs =
         (HdfsBlockLocation[])fs.getFileBlockLocations(testFile, 0, testFileLen);
-    assertEquals("Unexpected number of blocks", NUM_BLOCKS, locs.length);
+      assertEquals(NUM_BLOCKS, locs.length, "Unexpected number of blocks");
     final long[] blockSizes = getBlockSizes(locs);
 
     // Check initial state
     final long cacheCapacity = fsd.getCacheCapacity();
     long cacheUsed = fsd.getCacheUsed();
     long current = 0;
-    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
-    assertEquals("Unexpected amount of cache used", current, cacheUsed);
+      assertEquals(CACHE_CAPACITY, cacheCapacity, "Unexpected cache capacity");
+      assertEquals(current, cacheUsed, "Unexpected amount of cache used");
 
     MetricsRecordBuilder dnMetrics;
     long numCacheCommands = 0;
@@ -307,9 +307,9 @@
           current + blockSizes[i], i + 1, fsd);
       dnMetrics = getMetrics(dn.getMetrics().name());
       long cmds = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
-      assertTrue("Expected more cache requests from the NN ("
-          + cmds + " <= " + numCacheCommands + ")",
-           cmds > numCacheCommands);
+        assertTrue(
+                cmds > numCacheCommands, "Expected more cache requests from the NN ("
+                + cmds + " <= " + numCacheCommands + ")");
       numCacheCommands = cmds;
     }
 
@@ -321,8 +321,8 @@
               NUM_BLOCKS - 1 - i, fsd);
       dnMetrics = getMetrics(dn.getMetrics().name());
       long cmds = MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics);
-      assertTrue("Expected more uncache requests from the NN",
-           cmds > numUncacheCommands);
+        assertTrue(
+                cmds > numUncacheCommands, "Expected more uncache requests from the NN");
       numUncacheCommands = cmds;
     }
     LOG.info("finishing testCacheAndUncacheBlock");
@@ -408,9 +408,9 @@
         return lines > 0;
       }
     }, 500, 30000);
-    // Also check the metrics for the failure
-    assertTrue("Expected more than 0 failed cache attempts",
-        fsd.getNumBlocksFailedToCache() > 0);
+      // Also check the metrics for the failure
+      assertTrue(
+              fsd.getNumBlocksFailedToCache() > 0, "Expected more than 0 failed cache attempts");
 
     // Uncache the n-1 files
     int curCachedBlocks = 16;
@@ -439,15 +439,15 @@
     // Get the details of the written file
     HdfsBlockLocation[] locs =
         (HdfsBlockLocation[])fs.getFileBlockLocations(testFile, 0, testFileLen);
-    assertEquals("Unexpected number of blocks", NUM_BLOCKS, locs.length);
+      assertEquals(NUM_BLOCKS, locs.length, "Unexpected number of blocks");
     final long[] blockSizes = getBlockSizes(locs);
 
     // Check initial state
     final long cacheCapacity = fsd.getCacheCapacity();
     long cacheUsed = fsd.getCacheUsed();
     long current = 0;
-    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
-    assertEquals("Unexpected amount of cache used", current, cacheUsed);
+      assertEquals(CACHE_CAPACITY, cacheCapacity, "Unexpected cache capacity");
+      assertEquals(current, cacheUsed, "Unexpected amount of cache used");
 
     NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
       @Override
@@ -457,7 +457,7 @@
         try {
           Thread.sleep(3000);
         } catch (InterruptedException e) {
-          Assert.fail();
+          Assertions.fail();
         }
       }
     });
@@ -503,8 +503,8 @@
     // Write a small file
     Path fileName = new Path("/testPageRounder");
     final int smallBlocks = 512; // This should be smaller than the page size
-    assertTrue("Page size should be greater than smallBlocks!",
-        PAGE_SIZE > smallBlocks);
+      assertTrue(
+              PAGE_SIZE > smallBlocks, "Page size should be greater than smallBlocks!");
     final int numBlocks = 5;
     final int fileLen = smallBlocks * numBlocks;
     FSDataOutputStream out =
@@ -564,7 +564,7 @@
     final int TOTAL_BLOCKS_PER_CACHE =
         Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE);
     BlockReaderTestUtil.enableHdfsCachingTracing();
-    Assert.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE);
+    Assertions.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE);
     
     // Create a small file
     final Path SMALL_FILE = new Path("/smallFile");
@@ -602,7 +602,7 @@
         .setPool("pool").setPath(SMALL_FILE).setReplication((short)1).build());
     Thread.sleep(10000);
     MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
-    Assert.assertEquals(TOTAL_BLOCKS_PER_CACHE,
+    Assertions.assertEquals(TOTAL_BLOCKS_PER_CACHE,
         MetricsAsserts.getLongCounter("BlocksCached", dnMetrics));
     
     // Uncache the big file and verify that the small file can now be
@@ -626,7 +626,7 @@
           }
           LOG.info("directive " + shortCacheDirectiveId + " has been cached.");
         } catch (IOException e) {
-          Assert.fail("unexpected exception" + e.toString());
+          Assertions.fail("unexpected exception" + e.toString());
         }
         return true;
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 9b659d9..88ed8d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -76,9 +76,9 @@
 import org.apache.hadoop.util.FakeTimer;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 import java.io.File;
@@ -97,17 +97,10 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
@@ -201,7 +194,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     datanode = mock(DataNode.class);
     storage = mock(DataStorage.class);
@@ -605,10 +598,10 @@
     volReferences.close();
     dataset.removeVolumes(volumesToRemove, true);
     int expectedNumVolumes = dataDirs.length - 1;
-    assertEquals("The volume has been removed from the volumeList.",
-        expectedNumVolumes, getNumVolumes());
-    assertEquals("The volume has been removed from the storageMap.",
-        expectedNumVolumes, dataset.storageMap.size());
+      assertEquals(
+              expectedNumVolumes, getNumVolumes(), "The volume has been removed from the volumeList.");
+      assertEquals(
+              expectedNumVolumes, dataset.storageMap.size(), "The volume has been removed from the storageMap.");
 
     // DataNode.notifyNamenodeDeletedBlock() should be called 50 times
     // as we deleted one volume that has 50 blocks
@@ -631,9 +624,9 @@
     for (String bpid : dataset.volumeMap.getBlockPoolList()) {
       totalNumReplicas += dataset.volumeMap.size(bpid);
     }
-    assertEquals("The replica infos on this volume has been removed from the "
-                 + "volumeMap.", numBlocks / NUM_INIT_VOLUMES,
-                 totalNumReplicas);
+      assertEquals(numBlocks / NUM_INIT_VOLUMES,
+              totalNumReplicas, "The replica infos on this volume has been removed from the "
+              + "volumeMap.");
   }
 
   @Test(timeout = 30000)
@@ -675,10 +668,10 @@
 
     dataset.removeVolumes(volumesToRemove, true);
     int expectedNumVolumes = dataDirs.length - 2;
-    assertEquals("The volume has been removed from the volumeList.",
-        expectedNumVolumes, getNumVolumes());
-    assertEquals("The volume has been removed from the storageMap.",
-        expectedNumVolumes, dataset.storageMap.size());
+      assertEquals(
+              expectedNumVolumes, getNumVolumes(), "The volume has been removed from the volumeList.");
+      assertEquals(
+              expectedNumVolumes, dataset.storageMap.size(), "The volume has been removed from the storageMap.");
 
     // DataNode.notifyNamenodeDeletedBlock() should be called 100 times
     // as we deleted 2 volumes that have 100 blocks totally
@@ -703,8 +696,8 @@
     for (String bpid : dataset.volumeMap.getBlockPoolList()) {
       totalNumReplicas += dataset.volumeMap.size(bpid);
     }
-    assertEquals("The replica infos on this volume has been removed from the "
-        + "volumeMap.", 0, totalNumReplicas);
+      assertEquals(0, totalNumReplicas, "The replica infos on this volume has been removed from the "
+              + "volumeMap.");
   }
 
   @Test(timeout = 5000)
@@ -1000,7 +993,7 @@
           volumesToRemove.add(dataset.getVolume(eb).getStorageLocation());
         } catch (Exception e) {
           LOG.info("Problem preparing volumes to remove: ", e);
-          Assert.fail("Exception in remove volume thread, check log for " +
+          Assertions.fail("Exception in remove volume thread, check log for " +
               "details.");
         }
         LOG.info("Removing volume " + volumesToRemove);
@@ -1082,8 +1075,8 @@
         finalizedDir.setExecutable(false);
         assertTrue(FileUtil.setWritable(finalizedDir, false));
       }
-      Assert.assertTrue("Reference count for the volume should be greater "
-          + "than 0", volume.getReferenceCount() > 0);
+        Assertions.assertTrue(volume.getReferenceCount() > 0, "Reference count for the volume should be greater "
+                + "than 0");
       // Invoke the synchronous checkDiskError method
       dataNode.checkDiskError();
       // Sleep for 1 second so that datanode can interrupt and cluster clean up
@@ -1096,7 +1089,7 @@
 
       try {
         out.close();
-        Assert.fail("This is not a valid code path. "
+        Assertions.fail("This is not a valid code path. "
             + "out.close should have thrown an exception.");
       } catch (IOException ioe) {
         GenericTestUtils.assertExceptionContains(info.getXferAddr(), ioe);
@@ -1117,7 +1110,7 @@
       cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
       cluster.waitActive();
 
-      Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
+      Assertions.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
       DataNode dataNode = cluster.getDataNodes().get(0);
       ExtendedBlock block =
           new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), 0);
@@ -1128,8 +1121,8 @@
         threwException = true;
       }
       Thread.sleep(3000);
-      Assert.assertFalse(threwException);
-      Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
+      Assertions.assertFalse(threwException);
+      Assertions.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
 
       FileSystem fs = cluster.getFileSystem();
       Path filePath = new Path("testData");
@@ -1143,7 +1136,7 @@
       BlockManagerTestUtil.updateState(cluster.getNamesystem()
           .getBlockManager());
       // Verify the bad block has been reported to namenode
-      Assert.assertEquals(1, cluster.getNamesystem().getCorruptReplicaBlocks());
+      Assertions.assertEquals(1, cluster.getNamesystem().getCorruptReplicaBlocks());
     } finally {
       cluster.shutdown();
     }
@@ -1605,7 +1598,7 @@
       FsDatasetImpl fsDataSetImpl = (FsDatasetImpl) dataNode.getFSDataset();
       ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block);
       FsVolumeSpi destVolume = getDestinationVolume(block, fsDataSetImpl);
-      assertNotNull("Destination volume should not be null.", destVolume);
+        assertNotNull(destVolume, "Destination volume should not be null.");
       fsDataSetImpl.moveBlock(block, replicaInfo,
           destVolume.obtainReference(), false);
       // Trigger block report to update block info in NN
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index 15495df..e4c7755 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -38,8 +38,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 import java.io.File;
@@ -55,11 +55,7 @@
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -74,7 +70,7 @@
   private String baseDir;
   private BlockScanner blockScanner;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     dataset = mock(FsDatasetImpl.class);
     baseDir = new FileSystemTestHelper().getTestRootDir();
@@ -163,7 +159,7 @@
         .setStorageID("storage-id")
         .setConf(conf)
         .build();
-    assertEquals("", 100L, volume.getReserved());
+      assertEquals(100L, volume.getReserved(), "");
     // when storage type reserved is configured.
     conf.setLong(
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
@@ -181,7 +177,7 @@
         .setStorageID("storage-id")
         .setConf(conf)
         .build();
-    assertEquals("", 1L, volume1.getReserved());
+      assertEquals(1L, volume1.getReserved(), "");
     FsVolumeImpl volume2 = new FsVolumeImplBuilder().setDataset(dataset)
         .setStorageDirectory(
             new StorageDirectory(
@@ -189,7 +185,7 @@
         .setStorageID("storage-id")
         .setConf(conf)
         .build();
-    assertEquals("", 2L, volume2.getReserved());
+      assertEquals(2L, volume2.getReserved(), "");
     FsVolumeImpl volume3 = new FsVolumeImplBuilder().setDataset(dataset)
         .setStorageDirectory(
             new StorageDirectory(
@@ -197,7 +193,7 @@
         .setStorageID("storage-id")
         .setConf(conf)
         .build();
-    assertEquals("", 100L, volume3.getReserved());
+      assertEquals(100L, volume3.getReserved(), "");
     FsVolumeImpl volume4 = new FsVolumeImplBuilder().setDataset(dataset)
         .setStorageDirectory(
             new StorageDirectory(
@@ -205,7 +201,7 @@
         .setStorageID("storage-id")
         .setConf(conf)
         .build();
-    assertEquals("", 100L, volume4.getReserved());
+      assertEquals(100L, volume4.getReserved(), "");
     FsVolumeImpl volume5 = new FsVolumeImplBuilder().setDataset(dataset)
         .setStorageDirectory(
             new StorageDirectory(
@@ -405,10 +401,10 @@
     // It will create BlockPoolSlice.AddReplicaProcessor task's and lunch in
     // ForkJoinPool recursively
     vol.getVolumeMap(bpid, volumeMap, ramDiskReplicaMap);
-    assertTrue("Failed to add all the replica to map", volumeMap.replicas(bpid)
-        .size() == 1000);
-    assertEquals("Fork pool should be initialize with configured pool size",
-        poolSize, BlockPoolSlice.getAddReplicaForkPoolSize());
+      assertTrue(volumeMap.replicas(bpid)
+              .size() == 1000, "Failed to add all the replica to map");
+      assertEquals(
+              poolSize, BlockPoolSlice.getAddReplicaForkPoolSize(), "Fork pool should be initialize with configured pool size");
   }
 
   @Test(timeout = 60000)
@@ -427,9 +423,9 @@
           cluster.getNamesystem(0).getBlockPoolId()).getAddReplicaThreadPool();
       ForkJoinPool threadPool2 = vol.getBlockPoolSlice(
           cluster.getNamesystem(1).getBlockPoolId()).getAddReplicaThreadPool();
-      assertEquals(
-          "Thread pool instance should be same in all the BlockPoolSlice",
-          threadPool1, threadPool2);
+        assertEquals(
+                threadPool1, threadPool2,
+                "Thread pool instance should be same in all the BlockPoolSlice");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
index b72b1cd..0ed7f82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
@@ -17,9 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -60,10 +59,8 @@
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.Assert;
 import org.junit.Test;
-
-import static org.junit.Assume.assumeTrue;
+import org.junit.jupiter.api.Assertions;
 
 /**
  * This tests InterDataNodeProtocol for block handling. 
@@ -116,8 +113,8 @@
   public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
     Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
         b.getBlockPoolId(), b.getBlockId());
-    Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
-    Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
+    Assertions.assertEquals(b.getBlockId(), metainfo.getBlockId());
+    Assertions.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
   }
 
   public static LocatedBlock getLastLocatedBlock(
@@ -222,10 +219,10 @@
   }
 
   private static void assertEquals(ReplicaInfo originalInfo, ReplicaRecoveryInfo recoveryInfo) {
-    Assert.assertEquals(originalInfo.getBlockId(), recoveryInfo.getBlockId());
-    Assert.assertEquals(originalInfo.getGenerationStamp(), recoveryInfo.getGenerationStamp());
-    Assert.assertEquals(originalInfo.getBytesOnDisk(), recoveryInfo.getNumBytes());
-    Assert.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState());
+    Assertions.assertEquals(originalInfo.getBlockId(), recoveryInfo.getBlockId());
+    Assertions.assertEquals(originalInfo.getGenerationStamp(), recoveryInfo.getGenerationStamp());
+    Assertions.assertEquals(originalInfo.getBytesOnDisk(), recoveryInfo.getNumBytes());
+    Assertions.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState());
   }
 
   /** Test 
@@ -256,8 +253,8 @@
       assertEquals(originalInfo, recoveryInfo);
 
       final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(bpid, b);
-      Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
-      Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
+      Assertions.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
+      Assertions.assertEquals(recoveryid, updatedInfo.getRecoveryID());
 
       //recover one more time 
       final long recoveryid2 = gs + 2;
@@ -267,14 +264,14 @@
       assertEquals(originalInfo, recoveryInfo2);
 
       final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(bpid, b);
-      Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
-      Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
+      Assertions.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
+      Assertions.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
       
       //case RecoveryInProgressException
       try {
         FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
             DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
-        Assert.fail();
+        Assertions.fail();
       }
       catch(RecoveryInProgressException ripe) {
         System.out.println("GOOD: getting " + ripe);
@@ -287,7 +284,7 @@
       ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b,
           recoveryid,
           DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
-      Assert.assertNull("Data-node should not have this replica.", r);
+        Assertions.assertNull(r, "Data-node should not have this replica.");
     }
     
     { // BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs  
@@ -296,7 +293,7 @@
       try {
         FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
             DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
-        Assert.fail();
+        Assertions.fail();
       }
       catch(IOException ioe) {
         System.out.println("GOOD: getting " + ioe);
@@ -341,11 +338,11 @@
       final LocatedBlock locatedblock = getLastLocatedBlock(
           DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
       final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
-      Assert.assertTrue(datanodeinfo.length > 0);
+      Assertions.assertTrue(datanodeinfo.length > 0);
 
       //get DataNode and FSDataset objects
       final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
-      Assert.assertTrue(datanode != null);
+      Assertions.assertTrue(datanode != null);
 
       //initReplicaRecovery
       final ExtendedBlock b = locatedblock.getBlock();
@@ -358,7 +355,7 @@
       //check replica
       final Replica replica =
           cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
-      Assert.assertEquals(ReplicaState.RUR, replica.getState());
+      Assertions.assertEquals(ReplicaState.RUR, replica.getState());
 
       //check meta data before update
       cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
@@ -373,7 +370,7 @@
           //update should fail
           fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
               tmp.getBlockId(), newlength);
-          Assert.fail();
+          Assertions.fail();
         } catch(IOException ioe) {
           System.out.println("GOOD: getting " + ioe);
         }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index 14ed26e..74eba5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -23,8 +23,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ThreadUtil;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -35,10 +35,9 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
-
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestLazyPersistFiles extends LazyPersistTestCase {
   private static final int THREADPOOL_SIZE = 10;
@@ -182,7 +181,7 @@
       @Override
       public void run() {
         try {
-          Assert.assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
+          Assertions.assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
         } catch (Throwable e) {
           LOG.error("readerRunnable error", e);
           testFailed.set(true);
@@ -201,7 +200,7 @@
     for (int i = 0; i < NUM_TASKS; i++) {
       ThreadUtil.joinUninterruptibly(threads[i]);
     }
-    Assert.assertFalse(testFailed.get());
+    Assertions.assertFalse(testFailed.get());
   }
 
   /**
@@ -313,7 +312,7 @@
       // asyncLazyPersistService is already shutdown.
       // If we do not release references, the number of
       // references will increase infinitely.
-      Assert.assertTrue(
+      Assertions.assertTrue(
           beforeCnts[i] == afterCnt || beforeCnts[i] == (afterCnt - 1));
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java
index 2d54c48..63015e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java
@@ -29,7 +29,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -39,8 +39,8 @@
 import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
 
 /**
  * Verify that locked memory is used correctly when writing to replicas in
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java
index 6b198f1..f5dea6a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java
@@ -22,13 +22,13 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
 import static org.hamcrest.core.IsNot.not;
-import static org.junit.Assert.assertThat;
 
 
 public class TestLazyPersistPolicy extends LazyPersistTestCase {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java
index b6413ec..7cde96f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java
@@ -23,7 +23,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.concurrent.TimeoutException;
@@ -32,7 +32,7 @@
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.MatcherAssert.assertThat;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
index 5fa470c..6b1ccbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
@@ -27,14 +27,14 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
   @Test
@@ -61,8 +61,8 @@
     ensureFileReplicasOnStorageType(path1, RAM_DISK);
 
     LOG.info("Restarting the DataNode");
-    assertTrue("DN did not restart properly",
-        cluster.restartDataNode(0, true));
+      assertTrue(
+              cluster.restartDataNode(0, true), "DN did not restart properly");
     // wait for blockreport
     waitForBlockReport(dn, dnd);
     // Ensure that the replica is now on persistent storage.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
index 56cc41e..8c67f74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
@@ -23,8 +23,8 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -33,9 +33,9 @@
 
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestLazyWriter extends LazyPersistTestCase {
   @Test
@@ -192,7 +192,7 @@
         ensureFileReplicasOnStorageType(path, RAM_DISK);
     // Delete before persist
     client.delete(path.toString(), false);
-    Assert.assertFalse(fs.exists(path));
+    Assertions.assertFalse(fs.exists(path));
 
     assertThat(verifyDeletedBlocks(locatedBlocks), is(true));
 
@@ -218,7 +218,7 @@
 
     // Delete after persist
     client.delete(path.toString(), false);
-    Assert.assertFalse(fs.exists(path));
+    Assertions.assertFalse(fs.exists(path));
 
     assertThat(verifyDeletedBlocks(locatedBlocks), is(true));
     verifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java
index d3232c8..c5e4d1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestPmemCacheRecovery.java
@@ -23,10 +23,7 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -54,12 +51,12 @@
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 import java.util.function.Supplier;
@@ -101,7 +98,7 @@
         LoggerFactory.getLogger(FsDatasetCache.class), Level.DEBUG);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpClass() throws Exception {
     oldInjector = DataNodeFaultInjector.get();
     DataNodeFaultInjector.set(new DataNodeFaultInjector() {
@@ -117,12 +114,12 @@
     });
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownClass() throws Exception {
     DataNodeFaultInjector.set(oldInjector);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.setBoolean(DFS_DATANODE_PMEM_CACHE_RECOVERY_KEY, true);
@@ -152,7 +149,7 @@
     cacheManager = ((FsDatasetImpl) dn.getFSDataset()).cacheManager;
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (fs != null) {
       fs.close();
@@ -217,7 +214,7 @@
     final int cacheBlocksNum =
         Ints.checkedCast(CACHE_AMOUNT / BLOCK_SIZE);
     BlockReaderTestUtil.enableHdfsCachingTracing();
-    Assert.assertEquals(0, CACHE_AMOUNT % BLOCK_SIZE);
+    Assertions.assertEquals(0, CACHE_AMOUNT % BLOCK_SIZE);
 
     final Path testFile = new Path("/testFile");
     final long testFileLen = cacheBlocksNum * BLOCK_SIZE;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
index afd8168..af0894e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java
@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -81,8 +78,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -311,7 +308,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     datanode = mock(DataNode.class);
     storage = mock(DataStorage.class);
@@ -496,20 +493,20 @@
     // all these blocks can belong to the provided volume
     int blocksFound = getBlocksInProvidedVolumes(providedBasePath + "/test1/",
         expectedBlocks, minId);
-    assertEquals(
-        "Number of blocks in provided volumes should be " + expectedBlocks,
-        expectedBlocks, blocksFound);
+      assertEquals(
+              expectedBlocks, blocksFound,
+              "Number of blocks in provided volumes should be " + expectedBlocks);
     blocksFound = getBlocksInProvidedVolumes(
         "file:/" + providedBasePath + "/test1/", expectedBlocks, minId);
-    assertEquals(
-        "Number of blocks in provided volumes should be " + expectedBlocks,
-        expectedBlocks, blocksFound);
+      assertEquals(
+              expectedBlocks, blocksFound,
+              "Number of blocks in provided volumes should be " + expectedBlocks);
     // use a path that is entirely different from the providedBasePath
     // none of these blocks can belong to the volume
     blocksFound =
         getBlocksInProvidedVolumes("randomtest1/", expectedBlocks, minId);
-    assertEquals("Number of blocks in provided volumes should be 0", 0,
-        blocksFound);
+      assertEquals(0,
+              blocksFound, "Number of blocks in provided volumes should be 0");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java
index d4382d2..5136736 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java
@@ -31,10 +31,10 @@
 import org.apache.hadoop.hdfs.server.datanode.Replica;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
@@ -44,7 +44,7 @@
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_KEY;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Unit test for ReplicaCachingGetSpaceUsed class.
@@ -55,7 +55,7 @@
   private DistributedFileSystem fs;
   private DataNode dataNode;
 
-  @Before
+  @BeforeEach
   public void setUp()
       throws IOException, NoSuchMethodException, InterruptedException {
     conf = new Configuration();
@@ -70,7 +70,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -170,7 +170,7 @@
         }
       } catch (IOException e) {
         modifyThread.setShouldRun(false);
-        Assert.fail("Encounter IOException when deep copy replica.");
+        Assertions.fail("Encounter IOException when deep copy replica.");
       }
     }
     modifyThread.setShouldRun(false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
index 59203bb..61df4af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
@@ -17,14 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -36,7 +34,7 @@
   private final String bpid = "BP-TEST";
   private final  Block block = new Block(1234, 1234, 1234);
   
-  @Before
+  @BeforeEach
   public void setup() {
     map.add(bpid, new FinalizedReplica(block, null, null));
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
index fa666f2..66ca336 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
@@ -20,8 +20,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
@@ -31,7 +31,7 @@
 import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAggressive;
 import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorConservative;
 import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.when;
 
 /**
@@ -43,7 +43,7 @@
   private DF usage;
   private ReservedSpaceCalculator reserved;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     conf = new Configuration();
     usage = Mockito.mock(DF.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
index efc203b..ab522f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
@@ -28,12 +28,12 @@
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.NativeCodeLoader;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 import java.io.File;
@@ -43,10 +43,9 @@
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test Lazy persist behavior with short-circuit reads. These tests
@@ -55,16 +54,16 @@
  */
 public class TestScrLazyPersistFiles extends LazyPersistTestCase {
 
-  @BeforeClass
+  @BeforeAll
   public static void init() {
     DomainSocket.disableBindPathValidation();
   }
 
-  @Before
+  @BeforeEach
   public void before() {
-    Assume.assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
+    Assumptions.assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
     assumeNotWindows();
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
 
     final long osPageSize = NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
     Preconditions.checkState(BLOCK_SIZE >= osPageSize);
@@ -97,9 +96,9 @@
     try {
       byte[] buf = new byte[BUFFER_LENGTH];
       fis.read(0, buf, 0, BUFFER_LENGTH);
-      Assert.assertEquals(BUFFER_LENGTH,
+      Assertions.assertEquals(BUFFER_LENGTH,
         fis.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BUFFER_LENGTH,
+      Assertions.assertEquals(BUFFER_LENGTH,
         fis.getReadStatistics().getTotalShortCircuitBytesRead());
     } finally {
       fis.close();
@@ -162,7 +161,7 @@
     // subsequent legacy short-circuit reads in the ClientContext.
     // Assert that it didn't get disabled.
     ClientContext clientContext = client.getClientContext();
-    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
+    Assertions.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
   }
 
   private void doShortCircuitReadAfterEvictionTest() throws IOException,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
index a702cec..52ce8bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java
@@ -25,11 +25,11 @@
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -46,10 +46,10 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Daemon;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 import org.slf4j.event.Level;
@@ -87,7 +87,7 @@
 
   private static Random rand = new Random();
 
-  @Before
+  @BeforeEach
   public void before() {
     conf = new HdfsConfiguration();
   }
@@ -135,7 +135,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (singletonVolumeRef != null) {
       singletonVolumeRef.close();
@@ -337,10 +337,10 @@
       // Exception can be ignored (expected)
     }
 
-    // Ensure RBW space reserved is released
-    assertTrue(
-        "Expected ZERO but got " + fsVolumeImpl.getReservedForReplicas(),
-        fsVolumeImpl.getReservedForReplicas() == 0);
+      // Ensure RBW space reserved is released
+      assertTrue(
+              fsVolumeImpl.getReservedForReplicas() == 0,
+              "Expected ZERO but got " + fsVolumeImpl.getReservedForReplicas());
 
     // Reserve some bytes to verify double clearing space should't happen
     fsVolumeImpl.reserveSpaceForReplica(1000);
@@ -419,11 +419,11 @@
 
       performReReplication(file, true);
 
-      assertEquals("Wrong reserve space for Tmp ", byteCount1,
-          fsVolumeImpl.getRecentReserved());
+        assertEquals(byteCount1,
+                fsVolumeImpl.getRecentReserved(), "Wrong reserve space for Tmp ");
 
-      assertEquals("Reserved Tmp space is not released", 0,
-          fsVolumeImpl.getReservedForReplicas());
+        assertEquals(0,
+                fsVolumeImpl.getReservedForReplicas(), "Reserved Tmp space is not released");
     }
 
     // Test when file creation fails
@@ -464,11 +464,11 @@
 
       performReReplication(file, false);
 
-      assertEquals("Wrong reserve space for Tmp ", byteCount2,
-          fsVolumeImpl.getRecentReserved());
+        assertEquals(byteCount2,
+                fsVolumeImpl.getRecentReserved(), "Wrong reserve space for Tmp ");
 
-      assertEquals("Tmp space is not released OR released twice", 1000,
-          fsVolumeImpl.getReservedForReplicas());
+        assertEquals(1000,
+                fsVolumeImpl.getReservedForReplicas(), "Tmp space is not released OR released twice");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index e939389..f76f25f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -49,8 +47,8 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /** Test if FSDataset#append, writeToRbw, and writeToTmp */
 public class TestWriteToReplica {
@@ -188,9 +186,9 @@
         fvi.onBlockFileDeletion(bpid, -available);
         blocks[FINALIZED].setNumBytes(expectedLen + 100);
         dataSet.append(blocks[FINALIZED], newGS, expectedLen);
-        Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
+        Assertions.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
       } catch (DiskOutOfSpaceException e) {
-        Assert.assertTrue(e.getMessage().startsWith(
+        Assertions.assertTrue(e.getMessage().startsWith(
             "Insufficient space for appending to "));
       }
       fvi.onBlockFileDeletion(bpid, available);
@@ -205,37 +203,37 @@
     try {
       dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1, 
           blocks[TEMPORARY].getNumBytes());
-      Assert.fail("Should not have appended to a temporary replica " 
+      Assertions.fail("Should not have appended to a temporary replica " 
           + blocks[TEMPORARY]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
+      Assertions.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
           blocks[TEMPORARY], e.getMessage());
     }
 
     try {
       dataSet.append(blocks[RBW], blocks[RBW].getGenerationStamp()+1,
           blocks[RBW].getNumBytes());
-      Assert.fail("Should not have appended to an RBW replica" + blocks[RBW]);
+      Assertions.fail("Should not have appended to an RBW replica" + blocks[RBW]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
+      Assertions.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
           blocks[RBW], e.getMessage());
     }
 
     try {
       dataSet.append(blocks[RWR], blocks[RWR].getGenerationStamp()+1,
           blocks[RBW].getNumBytes());
-      Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
+      Assertions.fail("Should not have appended to an RWR replica" + blocks[RWR]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
+      Assertions.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
           blocks[RWR], e.getMessage());
     }
 
     try {
       dataSet.append(blocks[RUR], blocks[RUR].getGenerationStamp()+1,
           blocks[RUR].getNumBytes());
-      Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
+      Assertions.fail("Should not have appended to an RUR replica" + blocks[RUR]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
+      Assertions.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
           blocks[RUR], e.getMessage());
     }
 
@@ -243,10 +241,10 @@
       dataSet.append(blocks[NON_EXISTENT], 
           blocks[NON_EXISTENT].getGenerationStamp(), 
           blocks[NON_EXISTENT].getNumBytes());
-      Assert.fail("Should not have appended to a non-existent replica " + 
+      Assertions.fail("Should not have appended to a non-existent replica " + 
           blocks[NON_EXISTENT]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertEquals(ReplicaNotFoundException.NON_EXISTENT_REPLICA + 
+      Assertions.assertEquals(ReplicaNotFoundException.NON_EXISTENT_REPLICA + 
           blocks[NON_EXISTENT], e.getMessage());
     }
     
@@ -258,10 +256,10 @@
     try {
       dataSet.recoverAppend(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1, 
           blocks[TEMPORARY].getNumBytes());
-      Assert.fail("Should not have appended to a temporary replica " 
+      Assertions.fail("Should not have appended to a temporary replica " 
           + blocks[TEMPORARY]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
     }
 
@@ -272,18 +270,18 @@
     try {
       dataSet.recoverAppend(blocks[RWR], blocks[RWR].getGenerationStamp()+1,
           blocks[RBW].getNumBytes());
-      Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
+      Assertions.fail("Should not have appended to an RWR replica" + blocks[RWR]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
     }
 
     try {
       dataSet.recoverAppend(blocks[RUR], blocks[RUR].getGenerationStamp()+1,
           blocks[RUR].getNumBytes());
-      Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
+      Assertions.fail("Should not have appended to an RUR replica" + blocks[RUR]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
     }
 
@@ -291,10 +289,10 @@
       dataSet.recoverAppend(blocks[NON_EXISTENT], 
           blocks[NON_EXISTENT].getGenerationStamp(), 
           blocks[NON_EXISTENT].getNumBytes());
-      Assert.fail("Should not have appended to a non-existent replica " + 
+      Assertions.fail("Should not have appended to a non-existent replica " + 
           blocks[NON_EXISTENT]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.NON_EXISTENT_REPLICA));
     }
   }
@@ -308,10 +306,10 @@
     try {
       dataSet.recoverClose(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1, 
           blocks[TEMPORARY].getNumBytes());
-      Assert.fail("Should not have recovered close a temporary replica " 
+      Assertions.fail("Should not have recovered close a temporary replica " 
           + blocks[TEMPORARY]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
     }
 
@@ -322,18 +320,18 @@
     try {
       dataSet.recoverClose(blocks[RWR], blocks[RWR].getGenerationStamp()+1,
           blocks[RBW].getNumBytes());
-      Assert.fail("Should not have recovered close an RWR replica" + blocks[RWR]);
+      Assertions.fail("Should not have recovered close an RWR replica" + blocks[RWR]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
     }
 
     try {
       dataSet.recoverClose(blocks[RUR], blocks[RUR].getGenerationStamp()+1,
           blocks[RUR].getNumBytes());
-      Assert.fail("Should not have recovered close an RUR replica" + blocks[RUR]);
+      Assertions.fail("Should not have recovered close an RUR replica" + blocks[RUR]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
     }
 
@@ -341,10 +339,10 @@
       dataSet.recoverClose(blocks[NON_EXISTENT], 
           blocks[NON_EXISTENT].getGenerationStamp(), 
           blocks[NON_EXISTENT].getNumBytes());
-      Assert.fail("Should not have recovered close a non-existent replica " + 
+      Assertions.fail("Should not have recovered close a non-existent replica " + 
           blocks[NON_EXISTENT]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.NON_EXISTENT_REPLICA));
     }
   }
@@ -354,16 +352,16 @@
       dataSet.recoverRbw(blocks[FINALIZED],
           blocks[FINALIZED].getGenerationStamp()+1,
           0L, blocks[FINALIZED].getNumBytes());
-      Assert.fail("Should not have recovered a finalized replica " +
+      Assertions.fail("Should not have recovered a finalized replica " +
           blocks[FINALIZED]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.NON_RBW_REPLICA));
     }
  
     try {
       dataSet.createRbw(StorageType.DEFAULT, null, blocks[FINALIZED], false);
-      Assert.fail("Should not have created a replica that's already " +
+      Assertions.fail("Should not have created a replica that's already " +
       		"finalized " + blocks[FINALIZED]);
     } catch (ReplicaAlreadyExistsException e) {
     }
@@ -372,16 +370,16 @@
       dataSet.recoverRbw(blocks[TEMPORARY], 
           blocks[TEMPORARY].getGenerationStamp()+1, 
           0L, blocks[TEMPORARY].getNumBytes());
-      Assert.fail("Should not have recovered a temporary replica " +
+      Assertions.fail("Should not have recovered a temporary replica " +
           blocks[TEMPORARY]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.NON_RBW_REPLICA));
     }
 
     try {
       dataSet.createRbw(StorageType.DEFAULT, null, blocks[TEMPORARY], false);
-      Assert.fail("Should not have created a replica that had created as " +
+      Assertions.fail("Should not have created a replica that had created as " +
       		"temporary " + blocks[TEMPORARY]);
     } catch (ReplicaAlreadyExistsException e) {
     }
@@ -391,7 +389,7 @@
     
     try {
       dataSet.createRbw(StorageType.DEFAULT, null, blocks[RBW], false);
-      Assert.fail("Should not have created a replica that had created as RBW " +
+      Assertions.fail("Should not have created a replica that had created as RBW " +
           blocks[RBW]);
     } catch (ReplicaAlreadyExistsException e) {
     }
@@ -399,15 +397,15 @@
     try {
       dataSet.recoverRbw(blocks[RWR], blocks[RWR].getGenerationStamp()+1,
           0L, blocks[RWR].getNumBytes());
-      Assert.fail("Should not have recovered a RWR replica " + blocks[RWR]);
+      Assertions.fail("Should not have recovered a RWR replica " + blocks[RWR]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.NON_RBW_REPLICA));
     }
 
     try {
       dataSet.createRbw(StorageType.DEFAULT, null, blocks[RWR], false);
-      Assert.fail("Should not have created a replica that was waiting to be " +
+      Assertions.fail("Should not have created a replica that was waiting to be " +
       		"recovered " + blocks[RWR]);
     } catch (ReplicaAlreadyExistsException e) {
     }
@@ -415,15 +413,15 @@
     try {
       dataSet.recoverRbw(blocks[RUR], blocks[RUR].getGenerationStamp()+1,
           0L, blocks[RUR].getNumBytes());
-      Assert.fail("Should not have recovered a RUR replica " + blocks[RUR]);
+      Assertions.fail("Should not have recovered a RUR replica " + blocks[RUR]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(e.getMessage().startsWith(
+      Assertions.assertTrue(e.getMessage().startsWith(
           ReplicaNotFoundException.NON_RBW_REPLICA));
     }
 
     try {
       dataSet.createRbw(StorageType.DEFAULT, null, blocks[RUR], false);
-      Assert.fail("Should not have created a replica that was under recovery " +
+      Assertions.fail("Should not have created a replica that was under recovery " +
           blocks[RUR]);
     } catch (ReplicaAlreadyExistsException e) {
     }
@@ -432,10 +430,10 @@
       dataSet.recoverRbw(blocks[NON_EXISTENT],
           blocks[NON_EXISTENT].getGenerationStamp()+1,
           0L, blocks[NON_EXISTENT].getNumBytes());
-      Assert.fail("Cannot recover a non-existent replica " +
+      Assertions.fail("Cannot recover a non-existent replica " +
           blocks[NON_EXISTENT]);
     } catch (ReplicaNotFoundException e) {
-      Assert.assertTrue(
+      Assertions.assertTrue(
           e.getMessage().contains(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
     }
     
@@ -446,7 +444,7 @@
     try {
       dataSet.createTemporary(StorageType.DEFAULT, null, blocks[FINALIZED],
           false);
-      Assert.fail("Should not have created a temporary replica that was " +
+      Assertions.fail("Should not have created a temporary replica that was " +
       		"finalized " + blocks[FINALIZED]);
     } catch (ReplicaAlreadyExistsException e) {
     }
@@ -454,28 +452,28 @@
     try {
       dataSet.createTemporary(StorageType.DEFAULT, null, blocks[TEMPORARY],
           false);
-      Assert.fail("Should not have created a replica that had created as" +
+      Assertions.fail("Should not have created a replica that had created as" +
       		"temporary " + blocks[TEMPORARY]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
       dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RBW], false);
-      Assert.fail("Should not have created a replica that had created as RBW " +
+      Assertions.fail("Should not have created a replica that had created as RBW " +
           blocks[RBW]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
       dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RWR], false);
-      Assert.fail("Should not have created a replica that was waiting to be " +
+      Assertions.fail("Should not have created a replica that was waiting to be " +
       		"recovered " + blocks[RWR]);
     } catch (ReplicaAlreadyExistsException e) {
     }
     
     try {
       dataSet.createTemporary(StorageType.DEFAULT, null, blocks[RUR], false);
-      Assert.fail("Should not have created a replica that was under recovery " +
+      Assertions.fail("Should not have created a replica that was under recovery " +
           blocks[RUR]);
     } catch (ReplicaAlreadyExistsException e) {
     }
@@ -486,12 +484,12 @@
     try {
       dataSet.createTemporary(StorageType.DEFAULT, null, blocks[NON_EXISTENT],
           false);
-      Assert.fail("Should not have created a replica that had already been "
+      Assertions.fail("Should not have created a replica that had already been "
           + "created " + blocks[NON_EXISTENT]);
     } catch (Exception e) {
-      Assert.assertTrue(
+      Assertions.assertTrue(
           e.getMessage().contains(blocks[NON_EXISTENT].getBlockName()));
-      Assert.assertTrue(e instanceof ReplicaAlreadyExistsException);
+      Assertions.assertTrue(e instanceof ReplicaAlreadyExistsException);
     }
 
     long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10;
@@ -500,11 +498,11 @@
       ReplicaInPipeline replicaInfo =
           dataSet.createTemporary(StorageType.DEFAULT, null,
               blocks[NON_EXISTENT], false).getReplica();
-      Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
-      Assert.assertTrue(
+      Assertions.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
+      Assertions.assertTrue(
           replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
     } catch (ReplicaAlreadyExistsException e) {
-      Assert.fail("createTemporary should have allowed the block with newer "
+      Assertions.fail("createTemporary should have allowed the block with newer "
           + " generation stamp to be created " + blocks[NON_EXISTENT]);
     }
   }
@@ -525,8 +523,8 @@
       cluster.waitActive();
       NameNode nn1 = cluster.getNameNode(0);
       NameNode nn2 = cluster.getNameNode(1);
-      assertNotNull("cannot create nn1", nn1);
-      assertNotNull("cannot create nn2", nn2);
+        assertNotNull(nn1, "cannot create nn1");
+        assertNotNull(nn2, "cannot create nn2");
       
       // check number of volumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
@@ -535,8 +533,8 @@
       
       List<FsVolumeSpi> volumes = null;
       try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) {
-        // number of volumes should be 2 - [data1, data2]
-        assertEquals("number of volumes is wrong", 2, referredVols.size());
+          // number of volumes should be 2 - [data1, data2]
+          assertEquals(2, referredVols.size(), "number of volumes is wrong");
         volumes = new ArrayList<>(referredVols.size());
         for (FsVolumeSpi vol : referredVols) {
           volumes.add(vol);
@@ -545,9 +543,9 @@
       ArrayList<String> bpList = new ArrayList<>(Arrays.asList(
           cluster.getNamesystem(0).getBlockPoolId(),
           cluster.getNamesystem(1).getBlockPoolId()));
-      
-      Assert.assertTrue("Cluster should have 2 block pools", 
-          bpList.size() == 2);
+
+        Assertions.assertTrue(
+                bpList.size() == 2, "Cluster should have 2 block pools");
       
       createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn));
       ReplicaMap oldReplicaMap = new ReplicaMap(new ReentrantReadWriteLock());
@@ -591,7 +589,7 @@
     fsDataset.recoverRbw(blocks[RBW], blocks[RBW].getGenerationStamp(), 0L,
         rbw.getNumBytes());
     // after the recovery, on disk length should equal acknowledged length.
-    Assert.assertTrue(rbw.getBytesOnDisk() == rbw.getBytesAcked());
+    Assertions.assertTrue(rbw.getBytesOnDisk() == rbw.getBytesAcked());
 
     // reduce on disk length again; this time actually truncate the file to
     // simulate the data not being present
@@ -618,14 +616,14 @@
     // replicaInfo from oldReplicaMap.
     for (String bpid: bpidList) {
       for (ReplicaInfo info: newReplicaMap.replicas(bpid)) {
-        assertNotNull("Volume map before restart didn't contain the "
-            + "blockpool: " + bpid, oldReplicaMap.replicas(bpid));
+          assertNotNull(oldReplicaMap.replicas(bpid), "Volume map before restart didn't contain the "
+                  + "blockpool: " + bpid);
         
         ReplicaInfo oldReplicaInfo = oldReplicaMap.get(bpid, 
             info.getBlockId());
-        // Volume map after restart contains a blockpool id which 
-        assertNotNull("Old Replica Map didnt't contain block with blockId: " +
-            info.getBlockId(), oldReplicaInfo);
+          // Volume map after restart contains a blockpool id which 
+          assertNotNull(oldReplicaInfo, "Old Replica Map didnt't contain block with blockId: " +
+                  info.getBlockId());
         
         ReplicaState oldState = oldReplicaInfo.getState();
         // Since after restart, all the RWR, RBW and RUR blocks gets 
@@ -648,7 +646,7 @@
     for (String bpid: bpidList) {
       for (ReplicaInfo replicaInfo: oldReplicaMap.replicas(bpid)) {
         if (replicaInfo.getState() != ReplicaState.TEMPORARY) {
-          Assert.fail("After datanode restart we lost the block with blockId: "
+          Assertions.fail("After datanode restart we lost the block with blockId: "
               +  replicaInfo.getBlockId());
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java
index 1faddb3..b323ffb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java
@@ -23,9 +23,9 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.metrics2.lib.MetricsTestHelper;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -35,9 +35,9 @@
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
 
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
@@ -65,7 +65,7 @@
 
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setup() {
     GenericTestUtils.setLogLevel(DataNodePeerMetrics.LOG, Level.TRACE);
     GenericTestUtils.setLogLevel(OutlierDetector.LOG, Level.TRACE);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java
index 8a771e4..97fcc99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java
@@ -23,9 +23,9 @@
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,7 +37,7 @@
 import java.util.Map;
 import java.util.Set;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Unit tests for {@link OutlierDetector}.
@@ -235,7 +235,7 @@
 
   private OutlierDetector slowNodeDetector;
 
-  @Before
+  @BeforeEach
   public void setup() {
     slowNodeDetector = new OutlierDetector(MIN_OUTLIER_DETECTION_PEERS,
         (long) LOW_THRESHOLD);
@@ -250,11 +250,11 @@
       LOG.info("Verifying set {}", entry.getKey());
       final Set<String> outliers =
           slowNodeDetector.getOutliers(entry.getKey()).keySet();
-      assertTrue(
-          "Running outlier detection on " + entry.getKey() +
-              " was expected to yield set " + entry.getValue() + ", but " +
-              " we got set " + outliers,
-          outliers.equals(entry.getValue()));
+        assertTrue(
+                outliers.equals(entry.getValue()),
+                "Running outlier detection on " + entry.getKey() +
+                        " was expected to yield set " + entry.getValue() + ", but " +
+                        " we got set " + outliers);
     }
   }
 
@@ -275,10 +275,10 @@
       final Double errorPercent =
           Math.abs(median - expectedMedian) * 100.0 / expectedMedian;
 
-      assertTrue(
-          "Set " + inputList + "; Expected median: " +
-              expectedMedian + ", got: " + median,
-          errorPercent < 0.001);
+        assertTrue(
+                errorPercent < 0.001,
+                "Set " + inputList + "; Expected median: " +
+                        expectedMedian + ", got: " + median);
     }
   }
 
@@ -300,17 +300,17 @@
         final Double errorPercent =
             Math.abs(mad - expectedMad) * 100.0 / expectedMad;
 
-        assertTrue(
-            "Set " + entry.getKey() + "; Expected M.A.D.: " +
-                expectedMad + ", got: " + mad,
-            errorPercent < 0.001);
+          assertTrue(
+                  errorPercent < 0.001,
+                  "Set " + entry.getKey() + "; Expected M.A.D.: " +
+                          expectedMad + ", got: " + mad);
       } else {
         // For an input list of size 1, the MAD should be 0.0.
         final Double epsilon = 0.000001; // Allow for some FP math error.
-        assertTrue(
-            "Set " + entry.getKey() + "; Expected M.A.D.: " +
-                expectedMad + ", got: " + mad,
-            mad < epsilon);
+          assertTrue(
+                  mad < epsilon,
+                  "Set " + entry.getKey() + "; Expected M.A.D.: " +
+                          expectedMad + ", got: " + mad);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java
index 62827a2..e3732fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestDatanodeHttpXFrame.java
@@ -23,10 +23,10 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.http.HttpServer2;
-import org.junit.After;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 import java.io.IOException;
@@ -43,7 +43,7 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
-  @After
+  @AfterEach
   public void cleanUp() {
     if (cluster != null) {
       cluster.shutdown();
@@ -57,9 +57,9 @@
     cluster = createCluster(xFrameEnabled, null);
     HttpURLConnection conn = getConn(cluster);
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
-    Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
-        xfoHeader != null);
-    Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
+      Assertions.assertTrue(
+              xfoHeader != null, "X-FRAME-OPTIONS is absent in the header");
+    Assertions.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
         .SAMEORIGIN.toString()));
   }
 
@@ -69,7 +69,7 @@
     cluster = createCluster(xFrameEnabled, null);
     HttpURLConnection conn = getConn(cluster);
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
-    Assert.assertTrue("unexpected X-FRAME-OPTION in header", xfoHeader == null);
+      Assertions.assertTrue(xfoHeader == null, "unexpected X-FRAME-OPTION in header");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java
index 031ac0a..617e2097 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/TestHostRestrictingAuthorizationFilterHandler.java
@@ -28,15 +28,12 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.common.HostRestrictingAuthorizationFilter;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestHostRestrictingAuthorizationFilterHandler {
 
@@ -55,12 +52,12 @@
         new DefaultFullHttpRequest(HttpVersion.HTTP_1_1,
             HttpMethod.GET,
             WebHdfsFileSystem.PATH_PREFIX + "/user/myName/fooFile?op=OPEN");
-    // we will send back an error so ensure our write returns false
-    assertFalse("Should get error back from handler for rejected request",
-        channel.writeInbound(httpRequest));
+      // we will send back an error so ensure our write returns false
+      assertFalse(
+              channel.writeInbound(httpRequest), "Should get error back from handler for rejected request");
     DefaultHttpResponse channelResponse =
         (DefaultHttpResponse) channel.outboundMessages().poll();
-    assertNotNull("Expected response to exist.", channelResponse);
+      assertNotNull(channelResponse, "Expected response to exist.");
     assertEquals(HttpResponseStatus.FORBIDDEN, channelResponse.getStatus());
     assertFalse(channel.isOpen());
   }
@@ -89,12 +86,12 @@
         new DefaultFullHttpRequest(HttpVersion.HTTP_1_1,
             HttpMethod.GET,
             WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_three?op=OPEN");
-    assertTrue("Should successfully accept request",
-        channel.writeInbound(allowedHttpRequest));
-    assertTrue("Should successfully accept request, second time",
-        channel.writeInbound(allowedHttpRequest2));
-    assertTrue("Should successfully accept request, third time",
-        channel.writeInbound(allowedHttpRequest3));
+      assertTrue(
+              channel.writeInbound(allowedHttpRequest), "Should successfully accept request");
+      assertTrue(
+              channel.writeInbound(allowedHttpRequest2), "Should successfully accept request, second time");
+      assertTrue(
+              channel.writeInbound(allowedHttpRequest3), "Should successfully accept request, third time");
   }
 
   /*
@@ -125,15 +122,15 @@
         new DefaultFullHttpRequest(HttpVersion.HTTP_1_1,
             HttpMethod.GET,
             WebHdfsFileSystem.PATH_PREFIX + "/allowed/file_three?op=OPEN");
-    assertTrue("Should successfully accept request",
-        channel1.writeInbound(allowedHttpRequest));
-    assertTrue("Should successfully accept request, second time",
-        channel2.writeInbound(allowedHttpRequest2));
+      assertTrue(
+              channel1.writeInbound(allowedHttpRequest), "Should successfully accept request");
+      assertTrue(
+              channel2.writeInbound(allowedHttpRequest2), "Should successfully accept request, second time");
 
     // verify closing one channel does not affect remaining channels
     channel1.close();
-    assertTrue("Should successfully accept request, third time",
-        channel3.writeInbound(allowedHttpRequest3));
+      assertTrue(
+              channel3.writeInbound(allowedHttpRequest3), "Should successfully accept request, third time");
   }
 
   /*
@@ -148,8 +145,8 @@
             HttpMethod.GET,
             WebHdfsFileSystem.PATH_PREFIX + "/user/myName/fooFile?op" +
                 "=GETFILECHECKSUM");
-    assertTrue("Should successfully accept request",
-        channel.writeInbound(httpRequest));
+      assertTrue(
+              channel.writeInbound(httpRequest), "Should successfully accept request");
   }
 
   /*
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java
index 98465dc..b0ac4a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestDataNodeUGIProvider.java
@@ -52,9 +52,9 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 
 public class TestDataNodeUGIProvider {
@@ -65,7 +65,7 @@
   private final int LENGTH = 512;
   private final static int EXPIRE_AFTER_ACCESS = 5*1000;
   private Configuration conf;
-  @Before
+  @BeforeEach
   public void setUp(){
     conf = WebHdfsTestUtil.createConf();
     conf.setInt(DFSConfigKeys.DFS_WEBHDFS_UGI_EXPIRE_AFTER_ACCESS_KEY,
@@ -107,22 +107,22 @@
     UserGroupInformation ugi11 = ugiProvider1.ugi();
     UserGroupInformation ugi12 = ugiProvider1.ugi();
 
-    Assert.assertEquals(
-        "With UGI cache, two UGIs returned by the same token should be same",
-        ugi11, ugi12);
+      Assertions.assertEquals(
+              ugi11, ugi12,
+              "With UGI cache, two UGIs returned by the same token should be same");
 
     DataNodeUGIProvider ugiProvider2 = new DataNodeUGIProvider(
         new ParameterParser(new QueryStringDecoder(URI.create(uri2)), conf));
     UserGroupInformation url21 = ugiProvider2.ugi();
     UserGroupInformation url22 = ugiProvider2.ugi();
 
-    Assert.assertEquals(
-        "With UGI cache, two UGIs returned by the same token should be same",
-        url21, url22);
+      Assertions.assertEquals(
+              url21, url22,
+              "With UGI cache, two UGIs returned by the same token should be same");
 
-    Assert.assertNotEquals(
-        "With UGI cache, two UGIs for the different token should not be same",
-        ugi11, url22);
+      Assertions.assertNotEquals(
+              ugi11, url22,
+              "With UGI cache, two UGIs for the different token should not be same");
 
     ugiProvider2.clearCache();
     awaitCacheEmptyDueToExpiration();
@@ -131,12 +131,12 @@
 
     String msg = "With cache eviction, two UGIs returned" +
     " by the same token should not be same";
-    Assert.assertNotEquals(msg, ugi11, ugi12);
-    Assert.assertNotEquals(msg, url21, url22);
+      Assertions.assertNotEquals(ugi11, ugi12, msg);
+      Assertions.assertNotEquals(url21, url22, msg);
 
-    Assert.assertNotEquals(
-        "With UGI cache, two UGIs for the different token should not be same",
-        ugi11, url22);
+      Assertions.assertNotEquals(
+              ugi11, url22,
+              "With UGI cache, two UGIs for the different token should not be same");
   }
 
   @Test
@@ -158,22 +158,22 @@
     UserGroupInformation ugi11 = ugiProvider1.ugi();
     UserGroupInformation ugi12 = ugiProvider1.ugi();
 
-    Assert.assertEquals(
-        "With UGI cache, two UGIs for the same user should be same", ugi11,
-        ugi12);
+      Assertions.assertEquals(ugi11,
+              ugi12,
+              "With UGI cache, two UGIs for the same user should be same");
 
     DataNodeUGIProvider ugiProvider2 = new DataNodeUGIProvider(
         new ParameterParser(new QueryStringDecoder(URI.create(uri2)), conf));
     UserGroupInformation url21 = ugiProvider2.ugi();
     UserGroupInformation url22 = ugiProvider2.ugi();
 
-    Assert.assertEquals(
-        "With UGI cache, two UGIs for the same user should be same", url21,
-        url22);
+      Assertions.assertEquals(url21,
+              url22,
+              "With UGI cache, two UGIs for the same user should be same");
 
-    Assert.assertNotEquals(
-        "With UGI cache, two UGIs for the different user should not be same",
-        ugi11, url22);
+      Assertions.assertNotEquals(
+              ugi11, url22,
+              "With UGI cache, two UGIs for the different user should not be same");
 
     awaitCacheEmptyDueToExpiration();
     ugi12 = ugiProvider1.ugi();
@@ -181,12 +181,12 @@
 
     String msg = "With cache eviction, two UGIs returned by" +
     " the same user should not be same";
-    Assert.assertNotEquals(msg, ugi11, ugi12);
-    Assert.assertNotEquals(msg, url21, url22);
+      Assertions.assertNotEquals(ugi11, ugi12, msg);
+      Assertions.assertNotEquals(url21, url22, msg);
 
-    Assert.assertNotEquals(
-        "With UGI cache, two UGIs for the different user should not be same",
-        ugi11, url22);
+      Assertions.assertNotEquals(
+              ugi11, url22,
+              "With UGI cache, two UGIs for the different user should not be same");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
index 4040998..1212ef5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
@@ -27,10 +27,10 @@
 import org.apache.hadoop.hdfs.web.resources.OffsetParam;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import io.netty.handler.codec.http.QueryStringDecoder;
 
@@ -52,7 +52,7 @@
       + DelegationParam.NAME + "=" + token.encodeToUrlString());
     ParameterParser testParser = new ParameterParser(decoder, conf);
     final Token<DelegationTokenIdentifier> tok2 = testParser.delegationToken();
-    Assert.assertTrue(HAUtilClient.isTokenForLogicalUri(tok2));
+    Assertions.assertTrue(HAUtilClient.isTokenForLogicalUri(tok2));
   }
 
   @Test
@@ -61,7 +61,7 @@
     QueryStringDecoder decoder = new QueryStringDecoder(
             WebHdfsHandler.WEBHDFS_PREFIX + "/test");
     ParameterParser testParser = new ParameterParser(decoder, conf);
-    Assert.assertNull(testParser.delegationToken());
+    Assertions.assertNull(testParser.delegationToken());
   }
 
   @Test
@@ -73,7 +73,7 @@
     QueryStringDecoder decoder = new QueryStringDecoder(
       WebHdfsHandler.WEBHDFS_PREFIX + ESCAPED_PATH);
     ParameterParser testParser = new ParameterParser(decoder, conf);
-    Assert.assertEquals(EXPECTED_PATH, testParser.path());
+    Assertions.assertEquals(EXPECTED_PATH, testParser.path());
   }
 
   @Test
@@ -86,7 +86,7 @@
     EnumSet<CreateFlag> actual = testParser.createFlag();
     EnumSet<CreateFlag> expected = EnumSet.of(CreateFlag.APPEND,
         CreateFlag.SYNC_BLOCK);
-    Assert.assertEquals(expected.toString(), actual.toString());
+    Assertions.assertEquals(expected.toString(), actual.toString());
 
 
     final String path1 = "/test1?createflag=append";
@@ -96,14 +96,14 @@
 
     actual = testParser.createFlag();
     expected = EnumSet.of(CreateFlag.APPEND);
-    Assert.assertEquals(expected, actual);
+    Assertions.assertEquals(expected, actual);
 
     final String path2 = "/test1";
     decoder = new QueryStringDecoder(
         WebHdfsHandler.WEBHDFS_PREFIX + path2);
     testParser = new ParameterParser(decoder, conf);
     actual = testParser.createFlag();
-    Assert.assertEquals(0, actual.size());
+    Assertions.assertEquals(0, actual.size());
 
     final String path3 = "/test1?createflag=create,overwrite";
     decoder = new QueryStringDecoder(
@@ -112,7 +112,7 @@
     actual = testParser.createFlag();
     expected = EnumSet.of(CreateFlag.CREATE, CreateFlag
         .OVERWRITE);
-    Assert.assertEquals(expected.toString(), actual.toString());
+    Assertions.assertEquals(expected.toString(), actual.toString());
 
 
     final String path4 = "/test1?createflag=";
@@ -120,7 +120,7 @@
         WebHdfsHandler.WEBHDFS_PREFIX + path4);
     testParser = new ParameterParser(decoder, conf);
     actual = testParser.createFlag();
-    Assert.assertEquals(0, actual.size());
+    Assertions.assertEquals(0, actual.size());
 
     //Incorrect value passed to createflag
     try {
@@ -156,14 +156,14 @@
     final long X = 42;
 
     long offset = new OffsetParam(Long.toString(X)).getOffset();
-    Assert.assertEquals("OffsetParam: ", X, offset);
+      Assertions.assertEquals(X, offset, "OffsetParam: ");
 
     offset = new OffsetParam((String) null).getOffset();
-    Assert.assertEquals("OffsetParam with null should have defaulted to 0", 0, offset);
+      Assertions.assertEquals(0, offset, "OffsetParam with null should have defaulted to 0");
 
     try {
       offset = new OffsetParam("abc").getValue();
-      Assert.fail("OffsetParam with nondigit value should have thrown IllegalArgumentException");
+      Assertions.fail("OffsetParam with nondigit value should have thrown IllegalArgumentException");
     } catch (IllegalArgumentException iae) {
       // Ignore
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
index 2a496fb..de932e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
@@ -44,8 +44,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestConnectors.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestConnectors.java
index d7bdbc5..07fe617e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestConnectors.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestConnectors.java
@@ -23,10 +23,10 @@
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
@@ -39,14 +39,14 @@
   private final int volumeCount = 2; // default volumes in MiniDFSCluster.
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(numDatanodes).build();
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -61,10 +61,10 @@
     DiskBalancerCluster diskBalancerCluster =
         new DiskBalancerCluster(nameNodeConnector);
     diskBalancerCluster.readClusterInfo();
-    Assert.assertEquals("Expected number of Datanodes not found.",
-        numDatanodes, diskBalancerCluster.getNodes().size());
-    Assert.assertEquals("Expected number of volumes not found.",
-        volumeCount, diskBalancerCluster.getNodes().get(0).getVolumeCount());
+      Assertions.assertEquals(
+              numDatanodes, diskBalancerCluster.getNodes().size(), "Expected number of Datanodes not found.");
+      Assertions.assertEquals(
+              volumeCount, diskBalancerCluster.getNodes().get(0).getVolumeCount(), "Expected number of volumes not found.");
   }
 
   @Test
@@ -78,8 +78,8 @@
     String diskBalancerJson = diskBalancerCluster.toJson();
     DiskBalancerCluster serializedCluster =
         DiskBalancerCluster.parseJson(diskBalancerJson);
-    Assert.assertEquals("Parsed cluster is not equal to persisted info.",
-        diskBalancerCluster.getNodes().size(),
-        serializedCluster.getNodes().size());
+      Assertions.assertEquals(
+              diskBalancerCluster.getNodes().size(),
+              serializedCluster.getNodes().size(), "Parsed cluster is not equal to persisted info.");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
index 12fbcf1..6bfedd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import java.util.Collections;
 import java.util.LinkedList;
@@ -38,14 +38,14 @@
   public void testCreateRandomVolume() throws Exception {
     DiskBalancerTestUtil util = new DiskBalancerTestUtil();
     DiskBalancerVolume vol = util.createRandomVolume(StorageType.DISK);
-    Assert.assertNotNull(vol.getUuid());
-    Assert.assertNotNull(vol.getPath());
-    Assert.assertNotNull(vol.getStorageType());
-    Assert.assertFalse(vol.isFailed());
-    Assert.assertFalse(vol.isTransient());
-    Assert.assertTrue(vol.getCapacity() > 0);
-    Assert.assertTrue((vol.getCapacity() - vol.getReserved()) > 0);
-    Assert.assertTrue((vol.getReserved() + vol.getUsed()) < vol.getCapacity());
+    Assertions.assertNotNull(vol.getUuid());
+    Assertions.assertNotNull(vol.getPath());
+    Assertions.assertNotNull(vol.getStorageType());
+    Assertions.assertFalse(vol.isFailed());
+    Assertions.assertFalse(vol.isTransient());
+    Assertions.assertTrue(vol.getCapacity() > 0);
+    Assertions.assertTrue((vol.getCapacity() - vol.getReserved()) > 0);
+    Assertions.assertTrue((vol.getReserved() + vol.getUsed()) < vol.getCapacity());
   }
 
   @Test
@@ -53,8 +53,8 @@
     DiskBalancerTestUtil util = new DiskBalancerTestUtil();
     DiskBalancerVolumeSet vSet =
         util.createRandomVolumeSet(StorageType.SSD, 10);
-    Assert.assertEquals(10, vSet.getVolumeCount());
-    Assert.assertEquals(StorageType.SSD.toString(),
+    Assertions.assertEquals(10, vSet.getVolumeCount());
+    Assertions.assertEquals(StorageType.SSD.toString(),
         vSet.getVolumes().get(0).getStorageType());
 
   }
@@ -64,7 +64,7 @@
     DiskBalancerTestUtil util = new DiskBalancerTestUtil();
     DiskBalancerDataNode node = util.createRandomDataNode(
         new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 10);
-    Assert.assertNotNull(node.getNodeDataDensity());
+    Assertions.assertNotNull(node.getNodeDataDensity());
   }
 
   @Test
@@ -86,11 +86,11 @@
 
     for (int x = 0; x < queueSize; x++) {
 
-      Assert.assertEquals(reverseList.get(x).getCapacity(),
+      Assertions.assertEquals(reverseList.get(x).getCapacity(),
           highList.get(x).getCapacity());
-      Assert.assertEquals(reverseList.get(x).getReserved(),
+      Assertions.assertEquals(reverseList.get(x).getReserved(),
           highList.get(x).getReserved());
-      Assert.assertEquals(reverseList.get(x).getUsed(),
+      Assertions.assertEquals(reverseList.get(x).getUsed(),
           highList.get(x).getUsed());
     }
   }
@@ -117,7 +117,7 @@
     node.addVolume(v2);
 
     for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
-      Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
+      Assertions.assertFalse(vsets.isBalancingNeeded(10.0f));
     }
   }
 
@@ -143,7 +143,7 @@
     node.addVolume(v2);
 
     for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
-      Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
+      Assertions.assertFalse(vsets.isBalancingNeeded(10.0f));
     }
   }
 
@@ -170,7 +170,7 @@
     node.addVolume(v2);
 
     for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
-      Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
+      Assertions.assertFalse(vsets.isBalancingNeeded(10.0f));
     }
   }
 
@@ -194,7 +194,7 @@
     node.addVolume(v2);
 
     for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
-      Assert.assertTrue(vsets.isBalancingNeeded(10.0f));
+      Assertions.assertTrue(vsets.isBalancingNeeded(10.0f));
     }
   }
 
@@ -206,7 +206,7 @@
     DiskBalancerVolume parsedVolume =
         DiskBalancerVolume.parseJson(originalString);
     String parsedString = parsedVolume.toJson();
-    Assert.assertEquals(originalString, parsedString);
+    Assertions.assertEquals(originalString, parsedString);
   }
 
   @Test
@@ -220,8 +220,8 @@
 
     DiskBalancerCluster newCluster =
         DiskBalancerCluster.parseJson(cluster.toJson());
-    Assert.assertEquals(cluster.getNodes(), newCluster.getNodes());
-    Assert
+    Assertions.assertEquals(cluster.getNodes(), newCluster.getNodes());
+    Assertions
         .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
   }
 
@@ -233,11 +233,11 @@
     DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK);
     v1.setCapacity(DiskBalancerTestUtil.GB);
     v1.setUsed(2 * DiskBalancerTestUtil.GB);
-    Assert.assertEquals(v1.getUsed(),v1.getCapacity());
+    Assertions.assertEquals(v1.getUsed(),v1.getCapacity());
     // If usage is less than capacity, usage should be set to the real usage
     DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK);
     v2.setCapacity(2*DiskBalancerTestUtil.GB);
     v2.setUsed(DiskBalancerTestUtil.GB);
-    Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB);
+    Assertions.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index aa2c5a7..b5cde19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -48,8 +48,8 @@
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -65,11 +65,9 @@
 import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 
@@ -275,9 +273,9 @@
       // Expect return sleep delay in Milliseconds. sleep value = bytesCopied /
       // (1024*1024*bandwidth in MB/milli) - timeUsed;
       long val = diskBalancerMover.computeDelay(20 * 1024 * 1024, 1200, item);
-      Assert.assertEquals(val, (long) 800);
+      Assertions.assertEquals(val, (long) 800);
     } catch (Exception e) {
-      Assert.fail("Unexpected exception: " + e);
+      Assertions.fail("Unexpected exception: " + e);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -335,8 +333,8 @@
       dataMover.verifyAllVolumesHaveData(false);
     } finally {
       String logOut = logCapturer.getOutput();
-      Assert.assertTrue("Wrong log: " + logOut, logOut.contains(
-          "NextBlock call returned null. No valid block to copy."));
+        Assertions.assertTrue(logOut.contains(
+                "NextBlock call returned null. No valid block to copy."), "Wrong log: " + logOut);
       cluster.shutdown();
     }
   }
@@ -423,7 +421,7 @@
       dataMover.verifyAllVolumesHaveData(true);
       dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
     } catch (Exception e) {
-      Assert.fail("Unexpected exception: " + e);
+      Assertions.fail("Unexpected exception: " + e);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -733,7 +731,7 @@
             LOG.info("Removed disk!");
             removeDiskLatch.countDown();
           } catch (ReconfigurationException | InterruptedException e) {
-            Assert.fail("Unexpected error while reconfiguring: " + e);
+            Assertions.fail("Unexpected error while reconfiguring: " + e);
           }
         }
       };
@@ -757,8 +755,8 @@
         }
       }, 1000, 100000);
 
-      assertTrue("Disk balancer operation hit max errors!", errorCount.get() <=
-          DFSConfigKeys.DFS_DISK_BALANCER_MAX_DISK_ERRORS_DEFAULT);
+        assertTrue(errorCount.get() <=
+                DFSConfigKeys.DFS_DISK_BALANCER_MAX_DISK_ERRORS_DEFAULT, "Disk balancer operation hit max errors!");
       createWorkPlanLatch.await();
       removeDiskLatch.await();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
index 21c9a59..0eba79b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
@@ -37,12 +37,10 @@
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -51,20 +49,18 @@
 import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.NO_PLAN;
 import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.PLAN_DONE;
 import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.PLAN_UNDER_PROGRESS;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test DiskBalancer RPC.
  */
 public class TestDiskBalancerRPC {
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
 
   private static final String PLAN_FILE = "/system/current.plan.json";
   private MiniDFSCluster cluster;
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
@@ -72,7 +68,7 @@
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -94,16 +90,18 @@
   public void testSubmitPlanWithInvalidHash() throws Exception {
     RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
     DataNode dataNode = rpcTestHelper.getDataNode();
-    String planHash = rpcTestHelper.getPlanHash();
-    char[] hashArray = planHash.toCharArray();
+    String planHashValid = rpcTestHelper.getPlanHash();
+    char[] hashArray = planHashValid.toCharArray();
     hashArray[0]++;
-    planHash = String.valueOf(hashArray);
+    final String planHash = String.valueOf(hashArray);
     int planVersion = rpcTestHelper.getPlanVersion();
     NodePlan plan = rpcTestHelper.getPlan();
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN_HASH));
-    dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE,
-        plan.toJson(), false);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE,
+              plan.toJson(), false);
+        });
+    Assertions.assertEquals(thrown.getResult(), Result.INVALID_PLAN_HASH);
   }
 
   @Test
@@ -111,13 +109,14 @@
     RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
     DataNode dataNode = rpcTestHelper.getDataNode();
     String planHash = rpcTestHelper.getPlanHash();
-    int planVersion = rpcTestHelper.getPlanVersion();
-    planVersion++;
+    final int planVersion = rpcTestHelper.getPlanVersion() + 1;
     NodePlan plan = rpcTestHelper.getPlan();
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN_VERSION));
-    dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE,
-        plan.toJson(), false);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE,
+              plan.toJson(), false);
+        });
+    Assertions.assertEquals(thrown.getResult(), Result.INVALID_PLAN_VERSION);
   }
 
   @Test
@@ -127,10 +126,12 @@
     String planHash = rpcTestHelper.getPlanHash();
     int planVersion = rpcTestHelper.getPlanVersion();
     NodePlan plan = rpcTestHelper.getPlan();
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN));
-    dataNode.submitDiskBalancerPlan(planHash, planVersion, "", "",
-        false);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          dataNode.submitDiskBalancerPlan(planHash, planVersion, "", "",
+              false);
+        });
+    Assertions.assertEquals(thrown.getResult(), Result.INVALID_PLAN);
   }
 
   @Test
@@ -149,14 +150,16 @@
   public void testCancelNonExistentPlan() throws Exception {
     RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
     DataNode dataNode = rpcTestHelper.getDataNode();
-    String planHash = rpcTestHelper.getPlanHash();
-    char[] hashArray= planHash.toCharArray();
+    String planHashValid = rpcTestHelper.getPlanHash();
+    char[] hashArray= planHashValid.toCharArray();
     hashArray[0]++;
-    planHash = String.valueOf(hashArray);
+    final String planHash = String.valueOf(hashArray);
     NodePlan plan = rpcTestHelper.getPlan();
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(Result.NO_SUCH_PLAN));
-    dataNode.cancelDiskBalancePlan(planHash);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          dataNode.cancelDiskBalancePlan(planHash);
+        });
+    Assertions.assertEquals(thrown.getResult(), Result.NO_SUCH_PLAN);
   }
 
   @Test
@@ -165,9 +168,11 @@
     DataNode dataNode = rpcTestHelper.getDataNode();
     String planHash = "";
     NodePlan plan = rpcTestHelper.getPlan();
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(Result.NO_SUCH_PLAN));
-    dataNode.cancelDiskBalancePlan(planHash);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          dataNode.cancelDiskBalancePlan(planHash);
+        });
+    Assertions.assertEquals(thrown.getResult(), Result.NO_SUCH_PLAN);
   }
 
   @Test
@@ -176,14 +181,14 @@
     DataNode dataNode = cluster.getDataNodes().get(dnIndex);
     String volumeNameJson = dataNode.getDiskBalancerSetting(
         DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
-    Assert.assertNotNull(volumeNameJson);
+    Assertions.assertNotNull(volumeNameJson);
     ObjectMapper mapper = new ObjectMapper();
 
     @SuppressWarnings("unchecked")
     Map<String, String> volumemap =
         mapper.readValue(volumeNameJson, HashMap.class);
 
-    Assert.assertEquals(2, volumemap.size());
+    Assertions.assertEquals(2, volumemap.size());
   }
 
   @Test
@@ -191,9 +196,11 @@
     final int dnIndex = 0;
     final String invalidSetting = "invalidSetting";
     DataNode dataNode = cluster.getDataNodes().get(dnIndex);
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(Result.UNKNOWN_KEY));
-    dataNode.getDiskBalancerSetting(invalidSetting);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          dataNode.getDiskBalancerSetting(invalidSetting);
+        });
+    Assertions.assertEquals(thrown.getResult(), Result.UNKNOWN_KEY);
   }
 
   @Test
@@ -209,7 +216,7 @@
     String bandwidthString = dataNode.getDiskBalancerSetting(
         DiskBalancerConstants.DISKBALANCER_BANDWIDTH);
     long value = Long.decode(bandwidthString);
-    Assert.assertEquals(10L, value);
+    Assertions.assertEquals(10L, value);
   }
 
   @Test
@@ -223,7 +230,7 @@
     dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE,
         plan.toJson(), false);
     DiskBalancerWorkStatus status = dataNode.queryDiskBalancerPlan();
-    Assert.assertTrue(status.getResult() == PLAN_UNDER_PROGRESS ||
+    Assertions.assertTrue(status.getResult() == PLAN_UNDER_PROGRESS ||
         status.getResult() == PLAN_DONE);
   }
 
@@ -233,7 +240,7 @@
     DataNode dataNode = rpcTestHelper.getDataNode();
 
     DiskBalancerWorkStatus status = dataNode.queryDiskBalancerPlan();
-    Assert.assertTrue(status.getResult() == NO_PLAN);
+    Assertions.assertTrue(status.getResult() == NO_PLAN);
   }
 
   @Test
@@ -307,7 +314,7 @@
       DiskBalancerCluster diskBalancerCluster =
           new DiskBalancerCluster(nameNodeConnector);
       diskBalancerCluster.readClusterInfo();
-      Assert.assertEquals(cluster.getDataNodes().size(),
+      Assertions.assertEquals(cluster.getDataNodes().size(),
           diskBalancerCluster.getNodes().size());
       diskBalancerCluster.setNodesToProcess(diskBalancerCluster.getNodes());
       dataNode = cluster.getDataNodes().get(dnIndex);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
index e9c74ae..ab0fe44 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 import java.util.function.Supplier;
 import org.apache.commons.codec.digest.DigestUtils;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -43,11 +44,9 @@
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.net.URI;
@@ -55,9 +54,7 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.NO_PLAN;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Tests diskbalancer with a mock mover.
@@ -66,9 +63,6 @@
   static final Logger LOG =
       LoggerFactory.getLogger(TestDiskBalancerWithMockMover.class);
 
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
   private static final String PLAN_FILE = "/system/current.plan.json";
   private MiniDFSCluster cluster;
   private String sourceName;
@@ -94,11 +88,11 @@
         .setMover(blockMover)
         .build();
 
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
-        .Result.DISK_BALANCER_NOT_ENABLED));
-
-    balancer.queryWorkStatus();
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class,
+            balancer::queryWorkStatus);
+    Assertions.assertEquals(thrown.getResult(),
+        DiskBalancerException.Result.DISK_BALANCER_NOT_ENABLED);
   }
 
   /**
@@ -149,10 +143,12 @@
     // ask block mover to get stuck in copy block
     mockMoverHelper.getBlockMover().setSleep();
     executeSubmitPlan(plan, balancer);
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
-        .Result.PLAN_ALREADY_IN_PROGRESS));
-    executeSubmitPlan(plan, balancer);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          executeSubmitPlan(plan, balancer);
+        });
+    Assertions.assertEquals(thrown.getResult(),
+        DiskBalancerException.Result.PLAN_ALREADY_IN_PROGRESS);
 
     // Not needed but this is the cleanup step.
     mockMoverHelper.getBlockMover().clearSleep();
@@ -189,10 +185,12 @@
     DiskBalancer balancer = mockMoverHelper.getBalancer();
 
     plan.setTimeStamp(Time.now() - (32 * millisecondInAnHour));
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
-        .Result.OLD_PLAN_SUBMITTED));
-    executeSubmitPlan(plan, balancer);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          executeSubmitPlan(plan, balancer);
+        });
+    Assertions.assertEquals(thrown.getResult(),
+        DiskBalancerException.Result.OLD_PLAN_SUBMITTED);
   }
 
   @Test
@@ -201,12 +199,13 @@
     NodePlan plan = mockMoverHelper.getPlan();
     DiskBalancer balancer = mockMoverHelper.getBalancer();
 
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
-        .Result.INVALID_PLAN_VERSION));
-
-    // Plan version is invalid -- there is no version 0.
-    executeSubmitPlan(plan, balancer, 0);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          // Plan version is invalid -- there is no version 0.
+          executeSubmitPlan(plan, balancer, 0);
+        });
+    Assertions.assertEquals(thrown.getResult(),
+        DiskBalancerException.Result.INVALID_PLAN_VERSION);
   }
 
   @Test
@@ -217,11 +216,12 @@
     String planJson = plan.toJson();
     String planID = DigestUtils.sha1Hex(planJson);
 
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
-        .Result.INVALID_PLAN));
-
-    balancer.submitPlan(planID, 1, "no-plan-file.json", null, false);
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          balancer.submitPlan(planID, 1, "no-plan-file.json", null, false);
+        });
+    Assertions.assertEquals(thrown.getResult(),
+        DiskBalancerException.Result.INVALID_PLAN);
   }
 
   @Test
@@ -233,15 +233,16 @@
 
     String planJson = plan.toJson();
     String planID = DigestUtils.sha1Hex(planJson);
-    char repChar = planID.charAt(0);
-    repChar++;
+    char repCharValid = planID.charAt(0);
+    final char repChar = ++repCharValid;
 
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
-        .Result.INVALID_PLAN_HASH));
-    balancer.submitPlan(planID.replace(planID.charAt(0), repChar),
-        1, PLAN_FILE, planJson, false);
-
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          balancer.submitPlan(planID.replace(planID.charAt(0), repChar),
+              1, PLAN_FILE, planJson, false);
+        });
+    Assertions.assertEquals(thrown.getResult(),
+        DiskBalancerException.Result.INVALID_PLAN_HASH);
   }
 
   /**
@@ -273,12 +274,14 @@
     executeSubmitPlan(plan, balancer);
 
     // Send a Wrong cancellation request.
-    char first = planID.charAt(0);
-    first++;
-    thrown.expect(DiskBalancerException.class);
-    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
-        .Result.NO_SUCH_PLAN));
-    balancer.cancelPlan(planID.replace(planID.charAt(0), first));
+    char firstValid = planID.charAt(0);
+    final char first = ++firstValid;
+    final DiskBalancerException thrown =
+        Assertions.assertThrows(DiskBalancerException.class, () -> {
+          balancer.cancelPlan(planID.replace(planID.charAt(0), first));
+        });
+    Assertions.assertEquals(thrown.getResult(),
+        DiskBalancerException.Result.NO_SUCH_PLAN);
 
     // Now cancel the real one
     balancer.cancelPlan(planID);
@@ -318,7 +321,7 @@
   }
 
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     Configuration conf = new HdfsConfiguration();
     final int numStoragesPerDn = 2;
@@ -339,7 +342,7 @@
     references.close();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java
index c722df7..d164408 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java
@@ -27,8 +27,8 @@
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,9 +36,7 @@
 import java.util.List;
 import java.util.UUID;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test Planner.
@@ -55,7 +53,7 @@
         null);
     DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
     cluster.readClusterInfo();
-    Assert.assertEquals(3, cluster.getNodes().size());
+    Assertions.assertEquals(3, cluster.getNodes().size());
     cluster.setNodesToProcess(cluster.getNodes());
     DiskBalancerDataNode node = cluster.getNodes().get(0);
     GreedyPlanner planner = new GreedyPlanner(10.0f, node);
@@ -72,10 +70,10 @@
         null);
     DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
     cluster.readClusterInfo();
-    Assert.assertEquals(3, cluster.getNodes().size());
+    Assertions.assertEquals(3, cluster.getNodes().size());
     cluster.setNodesToProcess(cluster.getNodes());
     List<NodePlan> plan = cluster.computePlan(10.0f);
-    Assert.assertNotNull(plan);
+    Assertions.assertNotNull(plan);
   }
 
   private DiskBalancerVolume createVolume(String path, int capacityInGB,
@@ -115,7 +113,7 @@
     node.addVolume(volume30);
     nullConnector.addNode(node);
     cluster.readClusterInfo();
-    Assert.assertEquals(1, cluster.getNodes().size());
+    Assertions.assertEquals(1, cluster.getNodes().size());
 
     GreedyPlanner planner = new GreedyPlanner(10.0f, node);
     NodePlan plan = new NodePlan(node.getDataNodeName(),
@@ -142,7 +140,7 @@
 
     nullConnector.addNode(node);
     cluster.readClusterInfo();
-    Assert.assertEquals(1, cluster.getNodes().size());
+    Assertions.assertEquals(1, cluster.getNodes().size());
 
     GreedyPlanner planner = new GreedyPlanner(5.0f, node);
     NodePlan plan = new NodePlan(node.getDataNodeUUID(),
@@ -183,7 +181,7 @@
 
     nullConnector.addNode(node);
     cluster.readClusterInfo();
-    Assert.assertEquals(1, cluster.getNodes().size());
+    Assertions.assertEquals(1, cluster.getNodes().size());
 
     GreedyPlanner planner = new GreedyPlanner(5.0f, node);
     NodePlan plan = new NodePlan(node.getDataNodeUUID(),
@@ -219,7 +217,7 @@
 
     nullConnector.addNode(node);
     cluster.readClusterInfo();
-    Assert.assertEquals(1, cluster.getNodes().size());
+    Assertions.assertEquals(1, cluster.getNodes().size());
 
     GreedyPlanner planner = new GreedyPlanner(10.0f, node);
     NodePlan plan = new NodePlan(node.getDataNodeName(),
@@ -250,7 +248,7 @@
 
     nullConnector.addNode(node);
     cluster.readClusterInfo();
-    Assert.assertEquals(1, cluster.getNodes().size());
+    Assertions.assertEquals(1, cluster.getNodes().size());
 
     GreedyPlanner planner = new GreedyPlanner(10.0f, node);
     NodePlan plan = new NodePlan(node.getDataNodeName(),
@@ -289,7 +287,7 @@
 
     nullConnector.addNode(node);
     cluster.readClusterInfo();
-    Assert.assertEquals(1, cluster.getNodes().size());
+    Assertions.assertEquals(1, cluster.getNodes().size());
 
     GreedyPlanner planner = new GreedyPlanner(10.0f, node);
     NodePlan plan = new NodePlan(node.getDataNodeName(),
@@ -348,7 +346,7 @@
 
     nullConnector.addNode(node);
     cluster.readClusterInfo();
-    Assert.assertEquals(1, cluster.getNodes().size());
+    Assertions.assertEquals(1, cluster.getNodes().size());
 
     GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
     NodePlan newPlan = new NodePlan(node.getDataNodeName(), node
@@ -421,12 +419,12 @@
     newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"),
         newPlan);
 
-    // Assuming that our random disks at least generated one step
-    assertTrue("No Steps Generated from random disks, very unlikely",
-        newPlan.getVolumeSetPlans().size() > 0);
+      // Assuming that our random disks at least generated one step
+      assertTrue(
+              newPlan.getVolumeSetPlans().size() > 0, "No Steps Generated from random disks, very unlikely");
 
-    assertTrue("Steps Generated less than disk count - false",
-        newPlan.getVolumeSetPlans().size() < diskCount);
+      assertTrue(
+              newPlan.getVolumeSetPlans().size() < diskCount, "Steps Generated less than disk count - false");
     LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
 
   }
@@ -501,7 +499,7 @@
 
     nullConnector.addNode(node);
     cluster.readClusterInfo();
-    Assert.assertEquals(1, cluster.getNodes().size());
+    Assertions.assertEquals(1, cluster.getNodes().size());
 
     GreedyPlanner planner = new GreedyPlanner(1.0f, node);
     NodePlan plan = new NodePlan(node.getDataNodeName(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 1d6331d..31c1d47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -31,9 +31,9 @@
 import static org.hamcrest.CoreMatchers.allOf;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -66,10 +66,10 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 
 /**
@@ -88,7 +88,7 @@
   private final static long CAPCACITY = 300 * 1024;
   private final static long[] CAPACITIES = new long[] {CAPCACITY, CAPCACITY};
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
@@ -99,7 +99,7 @@
         "/diskBalancer/data-cluster-64node-3disk.json").toURI();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       // Just make sure we can shutdown datanodes.
@@ -372,11 +372,11 @@
     /* get path of plan file*/
     final String planFileName = dn.getDatanodeUuid();
 
-    /* verify plan command */
-    assertEquals(
-        "There must be two lines: the 1st is writing plan to...,"
-            + " the 2nd is actual full path of plan file.",
-        2, outputs.size());
+      /* verify plan command */
+      assertEquals(
+              2, outputs.size(),
+              "There must be two lines: the 1st is writing plan to...,"
+                      + " the 2nd is actual full path of plan file.");
     assertThat(outputs.get(1), containsString(planFileName));
 
     /* get full path of plan file*/
@@ -758,11 +758,11 @@
           parent,
           miniCluster.getDataNodes().get(0).getDatanodeUuid()).toString();
 
-      /* verify the path of plan */
-      assertEquals(
-          "There must be two lines: the 1st is writing plan to,"
-              + " the 2nd is actual full path of plan file.",
-          2, outputs.size());
+        /* verify the path of plan */
+        assertEquals(
+                2, outputs.size(),
+                "There must be two lines: the 1st is writing plan to,"
+                        + " the 2nd is actual full path of plan file.");
       assertThat(outputs.get(0), containsString("Writing plan to"));
       assertThat(outputs.get(1), containsString(planFileFullName));
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index 3cec739..d8fb004 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -36,8 +36,8 @@
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 import java.io.File;
 import java.io.IOException;
@@ -98,8 +98,8 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -129,7 +129,7 @@
 
   static Mover newMover(Configuration conf) throws IOException {
     final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-    Assert.assertEquals(1, namenodes.size());
+    Assertions.assertEquals(1, namenodes.size());
     Map<URI, List<Path>> nnMap = Maps.newHashMap();
     for (URI nn : namenodes) {
       nnMap.put(nn, null);
@@ -171,8 +171,8 @@
 
       final List<StorageType> storageTypes = new ArrayList<StorageType>(
           Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
-      Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
-      Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
+      Assertions.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
+      Assertions.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
     } finally {
       cluster.shutdown();
     }
@@ -212,13 +212,13 @@
       LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
       StorageType[] storageTypes = lb.getStorageTypes();
       for (StorageType storageType : storageTypes) {
-        Assert.assertTrue(StorageType.DISK == storageType);
+        Assertions.assertTrue(StorageType.DISK == storageType);
       }
       // move to ARCHIVE
       dfs.setStoragePolicy(dir, "COLD");
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", dir.toString()});
-      Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to ARCHIVE should be successful");
 
       // Wait till namenode notified about the block location details
       waitForLocatedBlockWithArchiveStorageType(dfs, file, sameNode ? 3 : 1);
@@ -276,14 +276,14 @@
     LocatedBlock lb = dfs1.getClient().getLocatedBlocks(file, 0).get(0);
     StorageType[] storageTypes = lb.getStorageTypes();
     for (StorageType storageType : storageTypes) {
-      Assert.assertTrue(StorageType.DISK == storageType);
+      Assertions.assertTrue(StorageType.DISK == storageType);
     }
 
     //verify before movement
     lb = dfs2.getClient().getLocatedBlocks(file, 0).get(0);
     storageTypes = lb.getStorageTypes();
     for (StorageType storageType : storageTypes) {
-      Assert.assertTrue(StorageType.ARCHIVE == storageType);
+      Assertions.assertTrue(StorageType.ARCHIVE == storageType);
     }
   }
 
@@ -344,14 +344,14 @@
       dfs1.setStoragePolicy(dir, "COLD");
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", nn1 + dir.toString()});
-      Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to ARCHIVE should be successful");
 
 
       //move to DISK
       dfs2.setStoragePolicy(dir, "HOT");
       rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", nn2 + dir.toString()});
-      Assert.assertEquals("Movement to DISK should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to DISK should be successful");
 
 
       // Wait till namenode notified about the block location details
@@ -396,7 +396,7 @@
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", nn1 + dir.toString(), nn2 + dir.toString()});
 
-      Assert.assertEquals("Movement to DISK should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to DISK should be successful");
 
       waitForLocatedBlockWithArchiveStorageType(dfs1, file, 3);
       waitForLocatedBlockWithDiskStorageType(dfs2, file, 3);
@@ -450,7 +450,7 @@
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", nn1 + dir.toString(), nn2 + dir.toString()});
 
-      Assert.assertEquals("Movement to DISK should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to DISK should be successful");
 
       waitForLocatedBlockWithArchiveStorageType(dfs1, file, 3);
       waitForLocatedBlockWithDiskStorageType(dfs2, file, 3);
@@ -510,9 +510,9 @@
   }
 
   private void checkMovePaths(List<Path> actual, Path... expected) {
-    Assert.assertEquals(expected.length, actual.size());
+    Assertions.assertEquals(expected.length, actual.size());
     for (Path p : expected) {
-      Assert.assertTrue(actual.contains(p));
+      Assertions.assertTrue(actual.contains(p));
     }
   }
 
@@ -531,24 +531,24 @@
       final Configuration conf = cluster.getConfiguration(0);
       try {
         Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar");
-        Assert.fail("Expected exception for illegal path bar");
+        Assertions.fail("Expected exception for illegal path bar");
       } catch (IllegalArgumentException e) {
         GenericTestUtils.assertExceptionContains("bar is not absolute", e);
       }
 
       Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf);
       Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-      Assert.assertEquals(1, namenodes.size());
-      Assert.assertEquals(1, movePaths.size());
+      Assertions.assertEquals(1, namenodes.size());
+      Assertions.assertEquals(1, movePaths.size());
       URI nn = namenodes.iterator().next();
-      Assert.assertTrue(movePaths.containsKey(nn));
-      Assert.assertNull(movePaths.get(nn));
+      Assertions.assertTrue(movePaths.containsKey(nn));
+      Assertions.assertNull(movePaths.get(nn));
 
       movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar");
       namenodes = DFSUtil.getInternalNsRpcUris(conf);
-      Assert.assertEquals(1, movePaths.size());
+      Assertions.assertEquals(1, movePaths.size());
       nn = namenodes.iterator().next();
-      Assert.assertTrue(movePaths.containsKey(nn));
+      Assertions.assertTrue(movePaths.containsKey(nn));
       checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
     } finally {
       cluster.shutdown();
@@ -569,11 +569,11 @@
       Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
           "-p", "/foo", "/bar");
       Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-      Assert.assertEquals(1, namenodes.size());
-      Assert.assertEquals(1, movePaths.size());
+      Assertions.assertEquals(1, namenodes.size());
+      Assertions.assertEquals(1, movePaths.size());
       URI nn = namenodes.iterator().next();
-      Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
-      Assert.assertTrue(movePaths.containsKey(nn));
+      Assertions.assertEquals(new URI("hdfs://MyCluster"), nn);
+      Assertions.assertTrue(movePaths.containsKey(nn));
       checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
     } finally {
       cluster.shutdown();
@@ -595,11 +595,11 @@
     DFSTestUtil.setFederatedConfiguration(cluster, conf);
     try {
       Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-      Assert.assertEquals(3, namenodes.size());
+      Assertions.assertEquals(3, namenodes.size());
 
       try {
         Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo");
-        Assert.fail("Expect exception for missing authority information");
+        Assertions.fail("Expect exception for missing authority information");
       } catch (IllegalArgumentException e) {
         GenericTestUtils.assertExceptionContains(
             "does not contain scheme and authority", e);
@@ -607,7 +607,7 @@
 
       try {
         Mover.Cli.getNameNodePathsToMove(conf, "-p", "hdfs:///foo");
-        Assert.fail("Expect exception for missing authority information");
+        Assertions.fail("Expect exception for missing authority information");
       } catch (IllegalArgumentException e) {
         GenericTestUtils.assertExceptionContains(
             "does not contain scheme and authority", e);
@@ -615,7 +615,7 @@
 
       try {
         Mover.Cli.getNameNodePathsToMove(conf, "-p", "wrong-hdfs://ns1/foo");
-        Assert.fail("Expect exception for wrong scheme");
+        Assertions.fail("Expect exception for wrong scheme");
       } catch (IllegalArgumentException e) {
         GenericTestUtils.assertExceptionContains("Cannot resolve the path", e);
       }
@@ -625,7 +625,7 @@
       URI nn2 = iter.next();
       Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
           "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar");
-      Assert.assertEquals(2, movePaths.size());
+      Assertions.assertEquals(2, movePaths.size());
       checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
       checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
     } finally {
@@ -648,7 +648,7 @@
     DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
     try {
       Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-      Assert.assertEquals(3, namenodes.size());
+      Assertions.assertEquals(3, namenodes.size());
 
       Iterator<URI> iter = namenodes.iterator();
       URI nn1 = iter.next();
@@ -656,7 +656,7 @@
       URI nn3 = iter.next();
       Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
           "-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
-      Assert.assertEquals(3, movePaths.size());
+      Assertions.assertEquals(3, movePaths.size());
       checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
       checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
       checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
@@ -689,13 +689,13 @@
       LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
       StorageType[] storageTypes = lb.getStorageTypes();
       for (StorageType storageType : storageTypes) {
-        Assert.assertTrue(StorageType.DISK == storageType);
+        Assertions.assertTrue(StorageType.DISK == storageType);
       }
       // move to ARCHIVE
       dfs.setStoragePolicy(new Path(file), "COLD");
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] { "-p", file.toString() });
-      Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to ARCHIVE should be successful");
 
       // Wait till namenode notified about the block location details
       waitForLocatedBlockWithArchiveStorageType(dfs, file, 2);
@@ -729,7 +729,7 @@
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] { "-p", file.toString() });
       int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
-      Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
+        Assertions.assertEquals(exitcode, rc, "Exit code should be " + exitcode);
     } finally {
       cluster.shutdown();
     }
@@ -762,7 +762,7 @@
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", file.toString()});
       int exitcode = ExitStatus.IO_EXCEPTION.getExitCode();
-      Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
+        Assertions.assertEquals(exitcode, rc, "Exit code should be " + exitcode);
     } finally {
       cluster.shutdown();
     }
@@ -796,8 +796,8 @@
       dfs.setStoragePolicy(new Path(file), "COLD");
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", file.toString()});
-      Assert.assertEquals("Movement should fail after some retry",
-          ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
+        Assertions.assertEquals(
+                ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc, "Movement should fail after some retry");
     } finally {
       cluster.shutdown();
     }
@@ -842,8 +842,8 @@
       fs.setStoragePolicy(new Path(file), "COLD");
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", file});
-      Assert.assertEquals("Retcode expected to be ExitStatus.SUCCESS (0).",
-          ExitStatus.SUCCESS.getExitCode(), rc);
+        Assertions.assertEquals(
+                ExitStatus.SUCCESS.getExitCode(), rc, "Retcode expected to be ExitStatus.SUCCESS (0).");
     } finally {
       cluster.shutdown();
     }
@@ -927,7 +927,7 @@
           client.getBlockLocations(fooFile, 0, fileLen);
       for(LocatedBlock lb : locatedBlocks.getLocatedBlocks()){
         for( StorageType type : lb.getStorageTypes()){
-          Assert.assertEquals(StorageType.DISK, type);
+          Assertions.assertEquals(StorageType.DISK, type);
         }
       }
       StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
@@ -956,13 +956,13 @@
       // run Mover
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] { "-p", barDir });
-      Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to ARCHIVE should be successful");
 
       // verify storage types and locations
       locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
       for(LocatedBlock lb : locatedBlocks.getLocatedBlocks()){
         for( StorageType type : lb.getStorageTypes()){
-          Assert.assertEquals(StorageType.ARCHIVE, type);
+          Assertions.assertEquals(StorageType.ARCHIVE, type);
         }
       }
       StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
@@ -997,7 +997,7 @@
       locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
       for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
         for (StorageType type : lb.getStorageTypes()) {
-          Assert.assertEquals(StorageType.ARCHIVE, type);
+          Assertions.assertEquals(StorageType.ARCHIVE, type);
         }
       }
     }finally{
@@ -1009,7 +1009,7 @@
     String username = "mover";
     File baseDir = GenericTestUtils.getTestDir(TestMover.class.getSimpleName());
     FileUtil.fullyDelete(baseDir);
-    Assert.assertTrue(baseDir.mkdirs());
+    Assertions.assertTrue(baseDir.mkdirs());
 
     Properties kdcConf = MiniKdc.createConf();
     MiniKdc kdc = new MiniKdc(kdcConf, baseDir);
@@ -1019,8 +1019,8 @@
         UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
     KerberosName.resetDefaultRealm();
-    Assert.assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+      Assertions.assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     keytabFile = new File(baseDir, username + ".keytab");
     String keytab = keytabFile.getAbsolutePath();
@@ -1078,7 +1078,7 @@
           // verify that mover runs Ok.
           testMovementWithLocalityOption(conf, true);
           // verify that UGI was logged in using keytab.
-          Assert.assertTrue(UserGroupInformation.isLoginKeytabBased());
+          Assertions.assertTrue(UserGroupInformation.isLoginKeytabBased());
           return null;
         }
       });
@@ -1123,7 +1123,7 @@
       LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
       StorageType[] storageTypes = lb.getStorageTypes();
       for (StorageType storageType : storageTypes) {
-        Assert.assertTrue(StorageType.DISK == storageType);
+        Assertions.assertTrue(StorageType.DISK == storageType);
       }
 
       // Adding one SSD based data node to the cluster.
@@ -1143,7 +1143,7 @@
           new String[] {"-p", dir.toString()});
 
       int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
-      Assert.assertEquals("Movement should fail", exitcode, rc);
+        Assertions.assertEquals(exitcode, rc, "Movement should fail");
 
     } finally {
       cluster.shutdown();
@@ -1182,8 +1182,8 @@
 
       // Delete block file so, block move will fail with FileNotFoundException
       LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
-      Assert.assertEquals("Wrong block count", 2,
-          locatedBlocks.locatedBlockCount());
+        Assertions.assertEquals(2,
+                locatedBlocks.locatedBlockCount(), "Wrong block count");
       LocatedBlock lb = locatedBlocks.get(0);
       cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
 
@@ -1191,8 +1191,8 @@
       dfs.setStoragePolicy(new Path(parenDir), "COLD");
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", parenDir.toString()});
-      Assert.assertEquals("Movement should fail after some retry",
-          ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
+        Assertions.assertEquals(
+                ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc, "Movement should fail after some retry");
     } finally {
       cluster.shutdown();
     }
@@ -1218,23 +1218,23 @@
       dfs.setStoragePolicy(new Path(file), "COLD");
       int rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", file.toString()});
-      Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to ARCHIVE should be successful");
 
       // Wait till namenode notified about the block location details
       waitForLocatedBlockWithArchiveStorageType(dfs, file, 1);
 
       // verify before unset policy
       LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
-      Assert.assertTrue(StorageType.ARCHIVE == (lb.getStorageTypes())[0]);
+      Assertions.assertTrue(StorageType.ARCHIVE == (lb.getStorageTypes())[0]);
 
       // unset storage policy
       dfs.unsetStoragePolicy(new Path(file));
       rc = ToolRunner.run(conf, new Mover.Cli(),
           new String[] {"-p", file.toString()});
-      Assert.assertEquals("Movement to DISK should be successful", 0, rc);
+        Assertions.assertEquals(0, rc, "Movement to DISK should be successful");
 
       lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
-      Assert.assertTrue(StorageType.DISK == (lb.getStorageTypes())[0]);
+      Assertions.assertTrue(StorageType.DISK == (lb.getStorageTypes())[0]);
     } finally {
       cluster.shutdown();
     }
@@ -1316,8 +1316,8 @@
 
     // Mock FsDatasetSpi#getPinning to show that the block is pinned.
     LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file, 0);
-    Assert.assertEquals("Wrong block count", 2,
-        locatedBlocks.locatedBlockCount());
+      Assertions.assertEquals(2,
+              locatedBlocks.locatedBlockCount(), "Wrong block count");
     LocatedBlock lb = locatedBlocks.get(0);
     DatanodeInfo datanodeInfo = lb.getLocations()[0];
     for (DataNode dn : cluster.getDataNodes()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
index d95e76f..08bc450 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
@@ -62,9 +62,8 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.Assert;
-import org.junit.Test;
-
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
 
@@ -275,7 +274,7 @@
         nnMap.put(nn, null);
       }
       int result = Mover.run(nnMap, conf);
-      Assert.assertEquals(expectedExitCode.getExitCode(), result);
+      Assertions.assertEquals(expectedExitCode.getExitCode(), result);
     }
 
     private void verifyNamespace() throws Exception {
@@ -309,7 +308,7 @@
           return;
         }
       }
-      Assert.fail("File " + file + " not found.");
+      Assertions.fail("File " + file + " not found.");
     }
 
     private void verifyFile(final Path parent, final HdfsFileStatus status,
@@ -318,17 +317,17 @@
       byte policyId = fileStatus.getStoragePolicy();
       BlockStoragePolicy policy = policies.getPolicy(policyId);
       if (expectedPolicyId != null) {
-        Assert.assertEquals((byte)expectedPolicyId, policy.getId());
+        Assertions.assertEquals((byte)expectedPolicyId, policy.getId());
       }
       final List<StorageType> types = policy.chooseStorageTypes(
           status.getReplication());
       for(LocatedBlock lb : fileStatus.getLocatedBlocks().getLocatedBlocks()) {
         final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
             lb.getStorageTypes());
-        Assert.assertTrue(fileStatus.getFullName(parent.toString())
-            + " with policy " + policy + " has non-empty overlap: " + diff
-            + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
-            diff.removeOverlap(true));
+          Assertions.assertTrue(
+                  diff.removeOverlap(true), fileStatus.getFullName(parent.toString())
+                  + " with policy " + policy + " has non-empty overlap: " + diff
+                  + ", the corresponding block is " + lb.getBlock().getLocalBlock());
       }
     }
 
@@ -348,7 +347,7 @@
         throws IOException {
       final List<LocatedBlock> lbs = dfs.getClient().getLocatedBlocks(
           file.toString(), 0).getLocatedBlocks();
-      Assert.assertEquals(1, lbs.size());
+      Assertions.assertEquals(1, lbs.size());
 
       LocatedBlock lb = lbs.get(0);
       StringBuilder types = new StringBuilder(); 
@@ -360,13 +359,13 @@
         } else if (t == StorageType.ARCHIVE) {
           r.archive++;
         } else {
-          Assert.fail("Unexpected storage type " + t);
+          Assertions.fail("Unexpected storage type " + t);
         }
       }
 
       if (expected != null) {
         final String s = "file = " + file + "\n  types = [" + types + "]";
-        Assert.assertEquals(s, expected, r);
+          Assertions.assertEquals(expected, r, s);
       }
       return r;
     }
@@ -520,7 +519,7 @@
       Map<URI, List<Path>> map = Mover.Cli.getNameNodePathsToMove(test.conf,
           "-p", "/foo/bar", "/foo2");
       int result = Mover.run(map, test.conf);
-      Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
+      Assertions.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
 
       Thread.sleep(5000);
       test.verify(true);
@@ -563,8 +562,8 @@
           barFile.toString(), BLOCK_SIZE);
       LOG.info("Locations: " + lbs);
       List<LocatedBlock> blks = lbs.getLocatedBlocks();
-      Assert.assertEquals(1, blks.size());
-      Assert.assertEquals(1, blks.get(0).getLocations().length);
+      Assertions.assertEquals(1, blks.size());
+      Assertions.assertEquals(1, blks.get(0).getLocations().length);
 
       banner("finish the migration, continue writing");
       // make sure the writing can continue
@@ -576,8 +575,8 @@
           barFile.toString(), BLOCK_SIZE);
       LOG.info("Locations: " + lbs);
       blks = lbs.getLocatedBlocks();
-      Assert.assertEquals(1, blks.size());
-      Assert.assertEquals(1, blks.get(0).getLocations().length);
+      Assertions.assertEquals(1, blks.size());
+      Assertions.assertEquals(1, blks.get(0).getLocations().length);
 
       banner("finish writing, starting reading");
       // check the content of /foo/bar
@@ -586,7 +585,7 @@
       // read from offset 1024
       in.readFully(BLOCK_SIZE, buf, 0, buf.length);
       IOUtils.cleanupWithLogger(LOG, in);
-      Assert.assertEquals("hello, world!", new String(buf));
+      Assertions.assertEquals("hello, world!", new String(buf));
     } finally {
       test.shutdownCluster();
     }
@@ -740,7 +739,7 @@
         // since no more ARCHIVE space.
         final Path file0 = new Path(pathPolicyMap.cold, "file0");
         final Replication r = test.getReplication(file0);
-        Assert.assertEquals(0, r.disk);
+        Assertions.assertEquals(0, r.disk);
 
         final short newReplication = (short) 5;
         test.dfs.setReplication(file0, newReplication);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
index 646e800..9c3bd8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index 5b1f6e4..87ef3fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -22,7 +22,7 @@
 import static org.apache.hadoop.fs.permission.AclEntryScope.*;
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -50,12 +50,12 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -92,21 +92,21 @@
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     pathCount += 1;
     path = new Path("/p" + pathCount);
     initFileSystems();
   }
 
-  @After
+  @AfterEach
   public void destroyFileSystems() {
     IOUtils.cleanupWithLogger(null, fs, fsAsBruce, fsAsDiana,
         fsAsSupergroupMember);
@@ -123,15 +123,15 @@
       aclEntry(ACCESS, OTHER, NONE),
       aclEntry(DEFAULT, USER, "foo", ALL));
     fs.setAcl(path, aclSpec);
-    Assert.assertTrue(path + " should have ACLs in FileStatus!",
-        fs.getFileStatus(path).hasAcl());
+      Assertions.assertTrue(
+              fs.getFileStatus(path).hasAcl(), path + " should have ACLs in FileStatus!");
 
     aclSpec = Lists.newArrayList(
       aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
       aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
     fs.modifyAclEntries(path, aclSpec);
-    Assert.assertTrue(path + " should have ACLs in FileStatus!",
-        fs.getFileStatus(path).hasAcl());
+      Assertions.assertTrue(
+              fs.getFileStatus(path).hasAcl(), path + " should have ACLs in FileStatus!");
 
     AclStatus s = fs.getAclStatus(path);
     AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
@@ -572,15 +572,15 @@
       aclEntry(DEFAULT, USER, "foo", ALL));
 
     fs.setAcl(path, aclSpec);
-    Assert.assertTrue(path + " should have ACLs in FileStatus!",
-        fs.getFileStatus(path).hasAcl());
-    Assert.assertTrue(path + " should have ACLs in FileStatus#toString()!",
-        fs.getFileStatus(path).toString().contains("hasAcl=true"));
+      Assertions.assertTrue(
+              fs.getFileStatus(path).hasAcl(), path + " should have ACLs in FileStatus!");
+      Assertions.assertTrue(
+              fs.getFileStatus(path).toString().contains("hasAcl=true"), path + " should have ACLs in FileStatus#toString()!");
     fs.removeAcl(path);
-    Assert.assertFalse(path + " should not have ACLs in FileStatus!",
-        fs.getFileStatus(path).hasAcl());
-    Assert.assertTrue(path + " should not have ACLs in FileStatus#toString()!",
-        fs.getFileStatus(path).toString().contains("hasAcl=false"));
+      Assertions.assertFalse(
+              fs.getFileStatus(path).hasAcl(), path + " should not have ACLs in FileStatus!");
+      Assertions.assertTrue(
+              fs.getFileStatus(path).toString().contains("hasAcl=false"), path + " should not have ACLs in FileStatus#toString()!");
 
     AclStatus s = fs.getAclStatus(path);
     AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
@@ -992,13 +992,13 @@
     List<AclEntry> aclSpec = Lists.newArrayList(
       aclEntry(DEFAULT, USER, "foo", ALL));
     fs.setAcl(path, aclSpec);
-    Assert.assertTrue(path + " should have ACLs in FileStatus!",
-        fs.getFileStatus(path).hasAcl());
+      Assertions.assertTrue(
+              fs.getFileStatus(path).hasAcl(), path + " should have ACLs in FileStatus!");
 
     Path dirPath = new Path(path, "dir1");
     fs.mkdirs(dirPath);
-    Assert.assertTrue(dirPath + " should have ACLs in FileStatus!",
-        fs.getFileStatus(dirPath).hasAcl());
+      Assertions.assertTrue(
+              fs.getFileStatus(dirPath).hasAcl(), dirPath + " should have ACLs in FileStatus!");
 
     AclStatus s = fs.getAclStatus(dirPath);
     AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
@@ -1478,17 +1478,17 @@
     // give all access at first
     fs.setPermission(p1, FsPermission.valueOf("-rwxrwxrwx"));
     AclStatus aclStatus = fs.getAclStatus(p1);
-    assertEquals("Entries should be empty", 0, aclStatus.getEntries().size());
-    assertEquals("Permission should be carried by AclStatus",
-        fs.getFileStatus(p1).getPermission(), aclStatus.getPermission());
+      assertEquals(0, aclStatus.getEntries().size(), "Entries should be empty");
+      assertEquals(
+              fs.getFileStatus(p1).getPermission(), aclStatus.getPermission(), "Permission should be carried by AclStatus");
 
     // Add a named entries with all access
     fs.modifyAclEntries(p1, Lists.newArrayList(
         aclEntry(ACCESS, USER, "bruce", ALL),
         aclEntry(ACCESS, GROUP, "groupY", ALL)));
     aclStatus = fs.getAclStatus(p1);
-    assertEquals("Entries should contain owner group entry also", 3, aclStatus
-        .getEntries().size());
+      assertEquals(3, aclStatus
+              .getEntries().size(), "Entries should contain owner group entry also");
 
     // restrict the access
     fs.setPermission(p1, FsPermission.valueOf("-rwxr-----"));
@@ -1540,8 +1540,8 @@
           aclEntry(DEFAULT, GROUP, "testdeduplicategroup", ALL));
       fs.mkdirs(p1);
       fs.modifyAclEntries(p1, aclSpec);
-      assertEquals("One more ACL feature should be unique", currentSize + 1,
-          AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
+        assertEquals(currentSize + 1,
+                AclStorage.getUniqueAclFeatures().getUniqueElementsSize(), "One more ACL feature should be unique");
       currentSize++;
     }
     Path child1 = new Path(p1, "child1");
@@ -1549,11 +1549,11 @@
     {
       // new child dir should copy entries from its parent.
       fs.mkdirs(child1);
-      assertEquals("One more ACL feature should be unique", currentSize + 1,
-          AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
+        assertEquals(currentSize + 1,
+                AclStorage.getUniqueAclFeatures().getUniqueElementsSize(), "One more ACL feature should be unique");
       child1AclFeature = getAclFeature(child1, cluster);
-      assertEquals("Reference count should be 1", 1,
-          child1AclFeature.getRefCount());
+        assertEquals(1,
+                child1AclFeature.getRefCount(), "Reference count should be 1");
       currentSize++;
     }
     Path child2 = new Path(p1, "child2");
@@ -1561,13 +1561,13 @@
       // new child dir should copy entries from its parent. But all entries are
       // same as its sibling without any more acl changes.
       fs.mkdirs(child2);
-      assertEquals("existing AclFeature should be re-used", currentSize,
-          AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
+        assertEquals(currentSize,
+                AclStorage.getUniqueAclFeatures().getUniqueElementsSize(), "existing AclFeature should be re-used");
       AclFeature child2AclFeature = getAclFeature(child1, cluster);
-      assertSame("Same Aclfeature should be re-used", child1AclFeature,
-          child2AclFeature);
-      assertEquals("Reference count should be 2", 2,
-          child2AclFeature.getRefCount());
+        assertSame(child1AclFeature,
+                child2AclFeature, "Same Aclfeature should be re-used");
+        assertEquals(2,
+                child2AclFeature.getRefCount(), "Reference count should be 2");
     }
     {
       // modification of ACL on should decrement the original reference count
@@ -1576,31 +1576,31 @@
           "user1", ALL));
       fs.modifyAclEntries(child1, aclSpec);
       AclFeature modifiedAclFeature = getAclFeature(child1, cluster);
-      assertEquals("Old Reference count should be 1", 1,
-          child1AclFeature.getRefCount());
-      assertEquals("New Reference count should be 1", 1,
-          modifiedAclFeature.getRefCount());
+        assertEquals(1,
+                child1AclFeature.getRefCount(), "Old Reference count should be 1");
+        assertEquals(1,
+                modifiedAclFeature.getRefCount(), "New Reference count should be 1");
 
       // removing the new added ACL entry should refer to old ACLfeature
       AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER)
           .setName("user1").build();
       fs.removeAclEntries(child1, Lists.newArrayList(aclEntry));
-      assertEquals("Old Reference count should be 2 again", 2,
-          child1AclFeature.getRefCount());
-      assertEquals("New Reference count should be 0", 0,
-          modifiedAclFeature.getRefCount());
+        assertEquals(2,
+                child1AclFeature.getRefCount(), "Old Reference count should be 2 again");
+        assertEquals(0,
+                modifiedAclFeature.getRefCount(), "New Reference count should be 0");
     }
     {
       // verify the reference count on deletion of Acls
       fs.removeAcl(child2);
-      assertEquals("Reference count should be 1", 1,
-          child1AclFeature.getRefCount());
+        assertEquals(1,
+                child1AclFeature.getRefCount(), "Reference count should be 1");
     }
     {
       // verify the reference count on deletion of dir with ACL
       fs.delete(child1, true);
-      assertEquals("Reference count should be 0", 0,
-          child1AclFeature.getRefCount());
+        assertEquals(0,
+                child1AclFeature.getRefCount(), "Reference count should be 0");
     }
 
     Path file1 = new Path(p1, "file1");
@@ -1610,11 +1610,11 @@
       // Using same reference on creation of file
       fs.create(file1).close();
       fileAclFeature = getAclFeature(file1, cluster);
-      assertEquals("Reference count should be 1", 1,
-          fileAclFeature.getRefCount());
+        assertEquals(1,
+                fileAclFeature.getRefCount(), "Reference count should be 1");
       fs.create(file2).close();
-      assertEquals("Reference count should be 2", 2,
-          fileAclFeature.getRefCount());
+        assertEquals(2,
+                fileAclFeature.getRefCount(), "Reference count should be 2");
     }
     {
       // modifying ACLs on file should decrease the reference count on old
@@ -1624,34 +1624,34 @@
       // adding new ACL entry
       fs.modifyAclEntries(file1, aclSpec);
       AclFeature modifiedFileAcl = getAclFeature(file1, cluster);
-      assertEquals("Old Reference count should be 1", 1,
-          fileAclFeature.getRefCount());
-      assertEquals("New Reference count should be 1", 1,
-          modifiedFileAcl.getRefCount());
+        assertEquals(1,
+                fileAclFeature.getRefCount(), "Old Reference count should be 1");
+        assertEquals(1,
+                modifiedFileAcl.getRefCount(), "New Reference count should be 1");
 
       // removing the new added ACL entry should refer to old ACLfeature
       AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER)
           .setName("user1").build();
       fs.removeAclEntries(file1, Lists.newArrayList(aclEntry));
-      assertEquals("Old Reference count should be 2", 2,
-          fileAclFeature.getRefCount());
-      assertEquals("New Reference count should be 0", 0,
-          modifiedFileAcl.getRefCount());
+        assertEquals(2,
+                fileAclFeature.getRefCount(), "Old Reference count should be 2");
+        assertEquals(0,
+                modifiedFileAcl.getRefCount(), "New Reference count should be 0");
     }
     {
       // reference count should be decreased on deletion of files with ACLs
       fs.delete(file2, true);
-      assertEquals("Reference count should be decreased on delete of the file",
-          1, fileAclFeature.getRefCount());
+        assertEquals(
+                1, fileAclFeature.getRefCount(), "Reference count should be decreased on delete of the file");
       fs.delete(file1, true);
-      assertEquals("Reference count should be decreased on delete of the file",
-          0, fileAclFeature.getRefCount());
+        assertEquals(
+                0, fileAclFeature.getRefCount(), "Reference count should be decreased on delete of the file");
 
       // On reference count reaches 0 instance should be removed from map
       fs.create(file1).close();
       AclFeature newFileAclFeature = getAclFeature(file1, cluster);
-      assertNotSame("Instance should be different on reference count 0",
-          fileAclFeature, newFileAclFeature);
+        assertNotSame(
+                fileAclFeature, newFileAclFeature, "Instance should be different on reference count 0");
       fileAclFeature = newFileAclFeature;
     }
     Map<AclFeature, Integer> restartRefCounter = new HashMap<>();
@@ -1670,12 +1670,12 @@
       cluster.restartNameNode(true);
       List<AclFeature> entriesAfterRestart = AclStorage.getUniqueAclFeatures()
           .getEntries();
-      assertEquals("Entries before and after should be same",
-          entriesBeforeRestart, entriesAfterRestart);
+        assertEquals(
+                entriesBeforeRestart, entriesAfterRestart, "Entries before and after should be same");
       for (AclFeature aclFeature : entriesAfterRestart) {
         int before = restartRefCounter.get(aclFeature);
-        assertEquals("ReferenceCount After Restart should be doubled",
-            before * 2, aclFeature.getRefCount());
+          assertEquals(
+                  before * 2, aclFeature.getRefCount(), "ReferenceCount After Restart should be doubled");
       }
     }
     {
@@ -1688,12 +1688,12 @@
       cluster.restartNameNode(true);
       List<AclFeature> entriesAfterRestart = AclStorage.getUniqueAclFeatures()
           .getEntries();
-      assertEquals("Entries before and after should be same",
-          entriesBeforeRestart, entriesAfterRestart);
+        assertEquals(
+                entriesBeforeRestart, entriesAfterRestart, "Entries before and after should be same");
       for (AclFeature aclFeature : entriesAfterRestart) {
         int before = restartRefCounter.get(aclFeature);
-        assertEquals("ReferenceCount After 2 Restarts should be tripled",
-            before * 3, aclFeature.getRefCount());
+          assertEquals(
+                  before * 3, aclFeature.getRefCount(), "ReferenceCount After 2 Restarts should be tripled");
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
index 7f4a0ce..dd63f5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -444,7 +442,7 @@
   public static Map<File, String> getFileMD5s(File... files) throws Exception {
     Map<File, String> ret = Maps.newHashMap();
     for (File f : files) {
-      assertTrue("Must exist: " + f, f.exists());
+        assertTrue(f.exists(), "Must exist: " + f);
       ret.put(f, getFileMD5(f));
     }
     return ret;
@@ -513,7 +511,7 @@
       for (long checkpointTxId : txids) {
         File image = new File(nameDir,
                               NNStorage.getImageFileName(checkpointTxId));
-        assertTrue("Expected non-empty " + image, image.length() > 0);
+          assertTrue(image.length() > 0, "Expected non-empty " + image);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index cc5133f..8cd6bf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -49,17 +49,14 @@
 import static org.apache.hadoop.fs.permission.FsAction.READ;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import static org.junit.jupiter.api.Assertions.*;
 
+import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
 
 /**
@@ -98,7 +95,7 @@
   private static final UserGroupInformation DIANA =
       UserGroupInformation.createUserForTesting("diana", new String[] { });
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
@@ -108,14 +105,14 @@
     initCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() {
     if (dfsCluster != null) {
       dfsCluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     pathCount += 1;
     path = new Path("/p" + pathCount);
@@ -125,7 +122,7 @@
     initFileSystem();
   }
 
-  @After
+  @AfterEach
   public void destroyFileSystems() {
     IOUtils.cleanupWithLogger(null, fs);
     fs = null;
@@ -155,19 +152,19 @@
     fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
 
     Map<String, byte[]> xattrs = fs.getXAttrs(usePath);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
     
     fs.removeXAttr(usePath, name1);
     
     xattrs = fs.getXAttrs(usePath);
-    Assert.assertEquals(xattrs.size(), 0);
+    Assertions.assertEquals(xattrs.size(), 0);
     
     // Create xattr which already exists.
     fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
     try {
       fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
-      Assert.fail("Creating xattr which already exists should fail.");
+      Assertions.fail("Creating xattr which already exists should fail.");
     } catch (IOException e) {
     }
     fs.removeXAttr(usePath, name1);
@@ -178,31 +175,31 @@
           EnumSet.of(XAttrSetFlag.CREATE));
     }
     xattrs = fs.getXAttrs(usePath);
-    Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
+    Assertions.assertEquals(xattrs.size(), expectedXAttrs.size());
     for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
       final byte[] val =
           (ent.getValue() == null) ? new byte[0] : ent.getValue();
-      Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
+      Assertions.assertArrayEquals(val, xattrs.get(ent.getKey()));
     }
     
     restart(false);
     initFileSystem();
     xattrs = fs.getXAttrs(usePath);
-    Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
+    Assertions.assertEquals(xattrs.size(), expectedXAttrs.size());
     for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
       final byte[] val =
           (ent.getValue() == null) ? new byte[0] : ent.getValue();
-      Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
+      Assertions.assertArrayEquals(val, xattrs.get(ent.getKey()));
     }
     
     restart(true);
     initFileSystem();
     xattrs = fs.getXAttrs(usePath);
-    Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
+    Assertions.assertEquals(xattrs.size(), expectedXAttrs.size());
     for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
       final byte[] val =
           (ent.getValue() == null) ? new byte[0] : ent.getValue();
-      Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
+      Assertions.assertArrayEquals(val, xattrs.get(ent.getKey()));
     }
 
     fs.delete(usePath, false);
@@ -222,15 +219,15 @@
     fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
     
     Map<String, byte[]> xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(newValue1, xattrs.get(name1));
     
     fs.removeXAttr(path, name1);
     
     // Replace xattr which does not exist.
     try {
       fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.REPLACE));
-      Assert.fail("Replacing xattr which does not exist should fail.");
+      Assertions.fail("Replacing xattr which does not exist should fail.");
     } catch (IOException e) {
     }
     
@@ -239,23 +236,23 @@
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.REPLACE));
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(new byte[0], xattrs.get(name2));
     
     restart(false);
     initFileSystem();
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(new byte[0], xattrs.get(name2));
     
     restart(true);
     initFileSystem();
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(new byte[0], xattrs.get(name2));
     
     fs.removeXAttr(path, name1);
     fs.removeXAttr(path, name2);
@@ -277,15 +274,15 @@
         XAttrSetFlag.REPLACE));
         
     Map<String, byte[]> xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
     fs.removeXAttr(path, name1);
     
     // Set xattr with null name
     try {
       fs.setXAttr(path, null, value1, EnumSet.of(XAttrSetFlag.CREATE, 
           XAttrSetFlag.REPLACE));
-      Assert.fail("Setting xattr with null name should fail.");
+      Assertions.fail("Setting xattr with null name should fail.");
     } catch (NullPointerException e) {
       GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
     } catch (RemoteException e) {
@@ -297,10 +294,10 @@
     try {
       fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, 
           XAttrSetFlag.REPLACE));
-      Assert.fail("Setting xattr with empty name should fail.");
+      Assertions.fail("Setting xattr with empty name should fail.");
     } catch (RemoteException e) {
-      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
-          HadoopIllegalArgumentException.class.getCanonicalName());
+        assertEquals(e.getClassName(),
+                HadoopIllegalArgumentException.class.getCanonicalName(), "Unexpected RemoteException: " + e);
       GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
@@ -310,11 +307,11 @@
     try {
       fs.setXAttr(path, "a1", value1, EnumSet.of(XAttrSetFlag.CREATE, 
           XAttrSetFlag.REPLACE));
-      Assert.fail("Setting xattr with invalid name prefix or without " +
+      Assertions.fail("Setting xattr with invalid name prefix or without " +
           "name prefix should fail.");
     } catch (RemoteException e) {
-      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
-          HadoopIllegalArgumentException.class.getCanonicalName());
+        assertEquals(e.getClassName(),
+                HadoopIllegalArgumentException.class.getCanonicalName(), "Unexpected RemoteException: " + e);
       GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
@@ -323,8 +320,8 @@
     // Set xattr without XAttrSetFlag
     fs.setXAttr(path, name1, value1);
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
     fs.removeXAttr(path, name1);
     
     // XAttr exists, and replace it using CREATE|REPLACE flag.
@@ -333,8 +330,8 @@
         XAttrSetFlag.REPLACE));
     
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(newValue1, xattrs.get(name1));
     
     fs.removeXAttr(path, name1);
     
@@ -344,7 +341,7 @@
     fs.setXAttr(path, name3, null);
     try {
       fs.setXAttr(path, name4, null);
-      Assert.fail("Setting xattr should fail if total number of xattrs " +
+      Assertions.fail("Setting xattr should fail if total number of xattrs " +
           "for inode exceeds max limit.");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Cannot add additional XAttr", e);
@@ -357,7 +354,7 @@
     String longName = "user.0123456789abcdefX0123456789abcdefX0123456789abcdef";
     try {
       fs.setXAttr(path, longName, null);
-      Assert.fail("Setting xattr should fail if name is too long.");
+      Assertions.fail("Setting xattr should fail if name is too long.");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("XAttr is too big", e);
       GenericTestUtils.assertExceptionContains("total size is 50", e);
@@ -367,7 +364,7 @@
     byte[] longValue = new byte[MAX_SIZE];
     try {
       fs.setXAttr(path, "user.a", longValue);
-      Assert.fail("Setting xattr should fail if value is too long.");
+      Assertions.fail("Setting xattr should fail if value is too long.");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("XAttr is too big", e);
       GenericTestUtils.assertExceptionContains("total size is 38", e);
@@ -400,12 +397,12 @@
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
 
     final byte[] theValue = fs.getXAttr(path, "USER.a2");
-    Assert.assertArrayEquals(value2, theValue);
+    Assertions.assertArrayEquals(value2, theValue);
 
     /* An XAttr that was requested does not exist. */
     try {
       final byte[] value = fs.getXAttr(path, name3);
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains(
           "At least one of the attributes provided was not found.", e);
@@ -419,7 +416,7 @@
       names.add(name3);
       try {
         final Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
-        Assert.fail("expected IOException");
+        Assertions.fail("expected IOException");
       } catch (IOException e) {
         GenericTestUtils.assertExceptionContains(
             "At least one of the attributes provided was not found.", e);
@@ -432,7 +429,7 @@
     /* Unknown namespace should throw an exception. */
     try {
       final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo");
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (Exception e) {
       GenericTestUtils.assertExceptionContains
           ("An XAttr name must be prefixed with " +
@@ -457,7 +454,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
     }
@@ -480,7 +477,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
@@ -501,7 +498,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
@@ -517,7 +514,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
@@ -537,7 +534,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
@@ -574,20 +571,20 @@
     fs.removeXAttr(path, name2);
     
     Map<String, byte[]> xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(new byte[0], xattrs.get(name3));
     
     restart(false);
     initFileSystem();
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(new byte[0], xattrs.get(name3));
     
     restart(true);
     initFileSystem();
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(new byte[0], xattrs.get(name3));
     
     fs.removeXAttr(path, name3);
   }
@@ -616,7 +613,7 @@
     try {
       fs.removeXAttr(path, name2);
       fs.removeXAttr(path, name2);
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("No matching attributes found", e);
     }
@@ -626,10 +623,10 @@
         "with user/trusted/security/system/raw, followed by a '.'";
     try {
       fs.removeXAttr(path, "wackynamespace.foo");
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (RemoteException e) {
-      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
-          HadoopIllegalArgumentException.class.getCanonicalName());
+        assertEquals(e.getClassName(),
+                HadoopIllegalArgumentException.class.getCanonicalName(), "Unexpected RemoteException: " + e);
       GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
@@ -651,7 +648,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
     } finally {
@@ -674,7 +671,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
@@ -695,7 +692,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
@@ -711,7 +708,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
@@ -731,7 +728,7 @@
             return null;
           }
         });
-      Assert.fail("expected IOException");
+      Assertions.fail("expected IOException");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
@@ -760,9 +757,9 @@
     Path renamePath = new Path(path.toString() + "-rename");
     fs.rename(path, renamePath);
     Map<String, byte[]> xattrs = fs.getXAttrs(renamePath);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
     fs.removeXAttr(renamePath, name1);
     fs.removeXAttr(renamePath, name2);
   }
@@ -794,7 +791,7 @@
 
     /* listXAttrs on a path with no XAttrs.*/
     final List<String> noXAttrs = fs.listXAttrs(path);
-    assertTrue("XAttrs were found?", noXAttrs.size() == 0);
+      assertTrue(noXAttrs.size() == 0, "XAttrs were found?");
 
     fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
@@ -919,9 +916,9 @@
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
     
     Map<String, byte[]> xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
   }
 
   @Test(timeout = 120000)
@@ -935,7 +932,7 @@
     Map<String, byte[]> xattrs;
     try {
       xattrs = fsAsDiana.getXAttrs(path);
-      Assert.fail("Diana should not have read access to get xattrs");
+      Assertions.fail("Diana should not have read access to get xattrs");
     } catch (AccessControlException e) {
       // Ignore
     }
@@ -944,18 +941,18 @@
     fsAsBruce.modifyAclEntries(path, Lists.newArrayList(
         aclEntry(ACCESS, USER, DIANA.getUserName(), READ)));
     xattrs = fsAsDiana.getXAttrs(path);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
 
     try {
       fsAsDiana.removeXAttr(path, name1);
-      Assert.fail("Diana should not have write access to remove xattrs");
+      Assertions.fail("Diana should not have write access to remove xattrs");
     } catch (AccessControlException e) {
       // Ignore
     }
 
     try {
       fsAsDiana.setXAttr(path, name2, value2);
-      Assert.fail("Diana should not have write access to set xattrs");
+      Assertions.fail("Diana should not have write access to set xattrs");
     } catch (AccessControlException e) {
       // Ignore
     }
@@ -963,7 +960,7 @@
     fsAsBruce.modifyAclEntries(path, Lists.newArrayList(
         aclEntry(ACCESS, USER, DIANA.getUserName(), ALL)));
     fsAsDiana.setXAttr(path, name2, value2);
-    Assert.assertArrayEquals(value2, fsAsDiana.getXAttrs(path).get(name2));
+    Assertions.assertArrayEquals(value2, fsAsDiana.getXAttrs(path).get(name2));
     fsAsDiana.removeXAttr(path, name1);
     fsAsDiana.removeXAttr(path, name2);
   }
@@ -980,14 +977,14 @@
     {
       // getXAttr
       final byte[] value = fs.getXAttr(rawPath, raw1);
-      Assert.assertArrayEquals(value, value1);
+      Assertions.assertArrayEquals(value, value1);
     }
 
     {
       // getXAttrs
       final Map<String, byte[]> xattrs = fs.getXAttrs(rawPath);
-      Assert.assertEquals(xattrs.size(), 1);
-      Assert.assertArrayEquals(value1, xattrs.get(raw1));
+      Assertions.assertEquals(xattrs.size(), 1);
+      Assertions.assertArrayEquals(value1, xattrs.get(raw1));
       fs.removeXAttr(rawPath, raw1);
     }
 
@@ -998,8 +995,8 @@
           XAttrSetFlag.REPLACE));
 
       final Map<String,byte[]> xattrs = fs.getXAttrs(rawPath);
-      Assert.assertEquals(xattrs.size(), 1);
-      Assert.assertArrayEquals(newValue1, xattrs.get(raw1));
+      Assertions.assertEquals(xattrs.size(), 1);
+      Assertions.assertArrayEquals(newValue1, xattrs.get(raw1));
 
       fs.removeXAttr(rawPath, raw1);
     }
@@ -1277,7 +1274,7 @@
     // Test that the xattr can't be deleted by anyone.
     try {
       userFs.removeXAttr(filePath, security1);
-      Assert.fail("Removing security xattr should fail.");
+      Assertions.fail("Removing security xattr should fail.");
     } catch (AccessControlException e) {
       GenericTestUtils.assertExceptionContains("The xattr '" +
           SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted.", e);
@@ -1312,10 +1309,10 @@
   private void verifySecurityXAttrExists(FileSystem userFs) throws Exception {
     try {
       final Map<String, byte[]> xattrs = userFs.getXAttrs(filePath);
-      Assert.assertEquals(1, xattrs.size());
-      Assert.assertNotNull(xattrs.get(security1));
-      Assert.assertArrayEquals("expected empty byte[] from getXAttr",
-          new byte[0], userFs.getXAttr(filePath, security1));
+      Assertions.assertEquals(1, xattrs.size());
+      Assertions.assertNotNull(xattrs.get(security1));
+        Assertions.assertArrayEquals(
+                new byte[0], userFs.getXAttr(filePath, security1), "expected empty byte[] from getXAttr");
 
     } catch (AccessControlException e) {
       fail("getXAttrs failed but expected it to succeed");
@@ -1327,9 +1324,9 @@
     // Test that a file with the xattr can or can't be opened.
     try {
       userFs.open(filePath).read();
-      assertFalse("open succeeded but expected it to fail", expectOpenFailure);
+        assertFalse(expectOpenFailure, "open succeeded but expected it to fail");
     } catch (AccessControlException e) {
-      assertTrue("open failed but expected it to succeed", expectOpenFailure);
+        assertTrue(expectOpenFailure, "open failed but expected it to succeed");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java
index fb46751..f2d6dad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java
@@ -30,9 +30,9 @@
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 /**
@@ -49,7 +49,7 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
index 91c1493..0602b4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
@@ -22,7 +22,7 @@
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTransformation.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.util.List;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java
index 9e9890f1..18972ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -33,9 +33,9 @@
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test AddBlockOp is written and read correctly
@@ -47,7 +47,7 @@
   private MiniDFSCluster cluster;
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -56,7 +56,7 @@
     cluster.waitActive();
   }
   
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
index 088a47e..a85eb69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
@@ -19,9 +19,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
+
 import java.io.IOException;
 import java.util.EnumSet;
 import org.slf4j.Logger;
@@ -36,9 +35,9 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 /**
@@ -54,7 +53,7 @@
   private Configuration conf;
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf)
@@ -63,7 +62,7 @@
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -103,18 +102,18 @@
     }
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
         ns.getBlockManager(), src, null, null, null, r);
-    assertNotNull("Targets must be generated", targets);
+      assertNotNull(targets, "Targets must be generated");
 
     // run second addBlock()
     LOG.info("Starting second addBlock for " + src);
     nn.addBlock(src, "clientName", null, null,
                 HdfsConstants.GRANDFATHER_INODE_ID, null, null);
-    assertTrue("Penultimate block must be complete",
-               checkFileProgress(src, false));
+      assertTrue(
+              checkFileProgress(src, false), "Penultimate block must be complete");
     LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
-    assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
+      assertEquals(1, lbs.getLocatedBlocks().size(), "Must be one block");
     LocatedBlock lb2 = lbs.get(0);
-    assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
+      assertEquals(REPLICATION, lb2.getLocations().length, "Wrong replication");
 
     // continue first addBlock()
     ns.writeLock();
@@ -125,14 +124,14 @@
     } finally {
       ns.writeUnlock();
     }
-    assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
+      assertEquals(lb2.getBlock(), newBlock.getBlock(), "Blocks are not equal");
 
     // check locations
     lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
-    assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
+      assertEquals(1, lbs.getLocatedBlocks().size(), "Must be one block");
     LocatedBlock lb1 = lbs.get(0);
-    assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
-    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
+      assertEquals(REPLICATION, lb1.getLocations().length, "Wrong replication");
+      assertEquals(lb1.getBlock(), lb2.getBlock(), "Blocks are not equal");
   }
 
   boolean checkFileProgress(String src, boolean checkall) throws IOException {
@@ -163,14 +162,14 @@
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
         HdfsConstants.GRANDFATHER_INODE_ID, null, null);
-    assertTrue("Block locations should be present",
-        lb1.getLocations().length > 0);
+      assertTrue(
+              lb1.getLocations().length > 0, "Block locations should be present");
 
     cluster.restartNameNode();
     nameNodeRpc = cluster.getNameNodeRpc();
     LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
         HdfsConstants.GRANDFATHER_INODE_ID, null, null);
-    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
-    assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
+      assertEquals(lb1.getBlock(), lb2.getBlock(), "Blocks are not equal");
+      assertTrue(lb2.getLocations().length > 0, "Wrong locations with retry");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
index aad8e9b..b4f9deb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
@@ -33,12 +33,12 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
@@ -46,8 +46,8 @@
 import java.util.BitSet;
 import java.util.List;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestAddOverReplicatedStripedBlocks {
 
@@ -68,7 +68,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
@@ -86,7 +86,7 @@
         ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -236,7 +236,7 @@
     for (byte index : bg.getBlockIndices()) {
       set.set(index);
     }
-    Assert.assertFalse(set.get(0));
+    Assertions.assertFalse(set.get(0));
     for (int i = 1; i < groupSize; i++) {
       assertTrue(set.get(i));
     }
@@ -244,7 +244,7 @@
 
   // This test is going to be rewritten in HDFS-10854. Ignoring this test
   // temporarily as it fails with the fix for HDFS-10301.
-  @Ignore
+  @Disabled
   @Test
   public void testProcessOverReplicatedAndMissingStripedBlock()
       throws Exception {
@@ -293,7 +293,7 @@
     for (byte index : bg.getBlockIndices()) {
       set.set(index);
     }
-    Assert.assertFalse(set.get(groupSize - 1));
+    Assertions.assertFalse(set.get(groupSize - 1));
     for (int i = 0; i < groupSize - 1; i++) {
       assertTrue(set.get(i));
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
index c03adf4..69568cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
@@ -33,10 +33,10 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
@@ -58,7 +58,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
@@ -68,7 +68,7 @@
         StripedFileTestUtil.getDefaultECPolicy().getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index aec6811..b5aaa15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -51,9 +51,9 @@
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.Rule;
 import org.junit.rules.Timeout;
 
@@ -63,10 +63,7 @@
 import java.util.UUID;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestAddStripedBlocks {
   private final ErasureCodingPolicy ecPolicy =
@@ -83,7 +80,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
@@ -93,7 +90,7 @@
     dfs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -485,17 +482,17 @@
       out.write("this is a replicated file".getBytes());
     }
     BlockLocation[] locations = dfs.getFileBlockLocations(replicated, 0, 100);
-    assertEquals("There should be exactly one Block present",
-        1, locations.length);
-    assertFalse("The file is Striped", locations[0].isStriped());
+      assertEquals(
+              1, locations.length, "There should be exactly one Block present");
+      assertFalse(locations[0].isStriped(), "The file is Striped");
 
     Path striped = new Path("/blockLocation/striped");
     try (FSDataOutputStream out = dfs.createFile(striped).recursive().build()) {
       out.write("this is a striped file".getBytes());
     }
     locations = dfs.getFileBlockLocations(striped, 0, 100);
-    assertEquals("There should be exactly one Block present",
-        1, locations.length);
-    assertTrue("The file is not Striped", locations[0].isStriped());
+      assertEquals(
+              1, locations.length, "There should be exactly one Block present");
+      assertTrue(locations[0].isStriped(), "The file is not Striped");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
index da4e71e..f074268 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
@@ -21,9 +21,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -42,9 +40,9 @@
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Startup and format tests
@@ -59,7 +57,7 @@
   private static Configuration config;
   private static MiniDFSCluster cluster = null;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     config = new Configuration();
     if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
@@ -90,7 +88,7 @@
   /**
    * clean up
    */
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if (cluster!=null) {
       cluster.shutdown();
@@ -135,10 +133,10 @@
       NameNode.format(config);
       fail("Format succeeded, when it should have failed");
     } catch (IOException e) { // expected to fail
-      // Verify we got message we expected
-      assertTrue("Exception was not about formatting Namenode", 
-          e.getMessage().startsWith("The option " + 
-                                    DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY));
+        // Verify we got message we expected
+        assertTrue(
+                e.getMessage().startsWith("The option " +
+                        DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY), "Exception was not about formatting Namenode");
       LOG.info("Expected failure: " + StringUtils.stringifyException(e));
       LOG.info("Done verifying format will fail with allowformat false");
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
index c86b04c..abafba5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
@@ -27,7 +27,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.FSNamesystemAuditLogger;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.event.Level;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index aa2c7f6..7c6e73f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -40,9 +40,8 @@
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.util.Lists;
 
-import org.junit.Before;
 import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 import org.slf4j.Logger;
@@ -70,11 +69,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.NNTOP_ENABLED_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doThrow;
 
@@ -90,7 +85,7 @@
 
   private static final short TEST_PERMISSION = (short) 0654;
 
-  @Before
+  @BeforeEach
   public void setup() {
     DummyAuditLogger.initialized = false;
     DummyAuditLogger.logCount = 0;
@@ -138,9 +133,9 @@
       List<AuditLogger> auditLoggers =
           cluster.getNameNode().getNamesystem().getAuditLoggers();
       for (AuditLogger auditLogger : auditLoggers) {
-        assertFalse(
-            "top audit logger is still hooked in after it is disabled",
-            auditLogger instanceof TopAuditLogger);
+          assertFalse(
+                  auditLogger instanceof TopAuditLogger,
+                  "top audit logger is still hooked in after it is disabled");
       }
     } finally {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
index 4d379b1..3d46ac1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
@@ -45,14 +45,17 @@
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 
-import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import org.junit.Before;
-import org.junit.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.fail;
+
 import org.mockito.Mockito;
 
 import static org.mockito.ArgumentMatchers.any;
@@ -75,7 +78,7 @@
   static UserGroupInformation user2;
   private static NamenodeProtocols proto;
 
-  @Before
+  @BeforeEach
   public void initialize() throws Exception {
     // start a cluster
     conf = new HdfsConfiguration();
@@ -100,7 +103,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     Server.getCurCall().set(null);
     fs.close();
@@ -130,8 +133,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log from getContentSummary",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log from getContentSummary");
   }
 
   @Test
@@ -187,8 +190,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log from Concat",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log from Concat");
   }
 
   @Test
@@ -225,8 +228,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -252,8 +255,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length+1 == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length + 1 == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -333,8 +336,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -399,8 +402,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -428,8 +431,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -451,8 +454,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -475,8 +478,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -500,8 +503,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -519,16 +522,16 @@
     String aceRemoveCachePoolPattern =
         ".*allowed=false.*ugi=theDoctor.*cmd=removeCachePool.*";
     int length = verifyAuditLogs(aceRemoveCachePoolPattern);
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
     try {
       fileSys.close();
       ((DistributedFileSystem) fileSys).removeCachePool("pool1");
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -550,8 +553,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e){
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -573,8 +576,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -596,8 +599,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException e) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
   }
 
   @Test
@@ -638,8 +641,8 @@
       fail("The operation should have failed with IOException");
     } catch (IOException ace) {
     }
-    assertTrue("Unexpected log!",
-        length == auditlog.getOutput().split("\n").length);
+      assertTrue(
+              length == auditlog.getOutput().split("\n").length, "Unexpected log!");
     cluster.getNamesystem().setFSDirectory(dir);
   }
 
@@ -1226,8 +1229,8 @@
       fail(
           "RestoreFailedStorage should have thrown AccessControlException!");
     } catch (IOException ace) {
-      assertEquals("Unexpected Exception!",
-          ace.getClass(), AccessControlException.class);
+        assertEquals(
+                ace.getClass(), AccessControlException.class, "Unexpected Exception!");
       String auditLogString =
           ".*allowed=false.*cmd=" + operationName + ".*";
       verifyAuditLogs(auditLogString);
@@ -1280,7 +1283,7 @@
     int length = auditlog.getOutput().split(System.lineSeparator()).length;
     String lastAudit = auditlog.getOutput()
         .split(System.lineSeparator())[length - 1];
-    assertTrue("Unexpected log!", lastAudit.matches(pattern));
+      assertTrue(lastAudit.matches(pattern), "Unexpected log!");
     return length;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
index 9fe7404..cdbda6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -56,9 +56,9 @@
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 import org.apache.log4j.RollingFileAppender;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -112,7 +112,7 @@
   Configuration conf;
   UserGroupInformation userGroupInfo;
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     // must configure prior to instantiating the namesystem because it
     // will reconfigure the logger if async is enabled
@@ -141,7 +141,7 @@
     userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
  }
 
-  @After
+  @AfterEach
   public void teardownCluster() throws Exception {
     util.cleanup(fs, "/srcdat");
     if (fs != null) {
@@ -165,7 +165,7 @@
     int val = istream.read();
     istream.close();
     verifyAuditLogs(true);
-    assertTrue("failed to read from file", val >= 0);
+      assertTrue(val >= 0, "failed to read from file");
   }
 
   /** test that allowed stat puts proper entry in audit log */
@@ -177,7 +177,7 @@
     setupAuditLogs();
     FileStatus st = userfs.getFileStatus(file);
     verifyAuditLogs(true);
-    assertTrue("failed to stat file", st != null && st.isFile());
+      assertTrue(st != null && st.isFile(), "failed to stat file");
   }
 
   /** test that denied operation puts proper entry in audit log */
@@ -216,7 +216,7 @@
     istream.close();
 
     verifyAuditLogsRepeat(true, 3);
-    assertTrue("failed to read from file", val >= 0);
+      assertTrue(val >= 0, "failed to read from file");
   }
 
   /** test that stat via webhdfs puts proper entry in audit log */
@@ -233,7 +233,7 @@
     FileStatus st = webfs.getFileStatus(file);
 
     verifyAuditLogs(true);
-    assertTrue("failed to stat file", st != null && st.isFile());
+      assertTrue(st != null && st.isFile(), "failed to stat file");
   }
 
   /** test that denied access via webhdfs puts proper entry in audit log */
@@ -334,12 +334,12 @@
       for (int i = 0; i < ndupe; i++) {
         line = reader.readLine();
         assertNotNull(line);
-        assertTrue("Expected audit event not found in audit log",
-            auditPattern.matcher(line).matches());
+          assertTrue(
+                  auditPattern.matcher(line).matches(), "Expected audit event not found in audit log");
         ret &= successPattern.matcher(line).matches();
       }
-      assertNull("Unexpected event in audit log", reader.readLine());
-      assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
+        assertNull(reader.readLine(), "Unexpected event in audit log");
+        assertTrue(ret == expectSuccess, "Expected success=" + expectSuccess);
     } finally {
       reader.close();
     }
@@ -371,9 +371,9 @@
           patternMatches |= pattern.matcher(line).matches();
           ret &= successPattern.matcher(line).matches();
         }
-        assertNull("Unexpected event in audit log", reader.readLine());
-        assertTrue("Expected audit event not found in audit log", patternMatches);
-        assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
+        assertNull(reader.readLine(), "Unexpected event in audit log");
+        assertTrue(patternMatches, "Expected audit event not found in audit log");
+        assertTrue(ret == expectSuccess, "Expected success=" + expectSuccess);
       } finally {
         reader.close();
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java
index 1f52cf3..f3c8af6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuthorizationContext.java
@@ -19,12 +19,12 @@
 
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
-import static junit.framework.TestCase.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
@@ -45,7 +45,7 @@
   private String path = "";
   private int ancestorIndex = inodes.length - 2;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     when(iip.getPathSnapshotId()).thenReturn(snapshotId);
     when(iip.getINodesArray()).thenReturn(inodes);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
index 8481b57..babe209 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
@@ -17,11 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -55,9 +51,8 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
 import org.slf4j.event.Level;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
 
@@ -77,7 +72,7 @@
   static final int blockSize = 4096;
   static final int fileSize = 8192;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     File baseDir = new File(BASE_DIR);
     if(baseDir.exists())
@@ -110,10 +105,10 @@
 
     BackupNode bn = (BackupNode)NameNode.createNameNode(
         new String[]{startupOpt.getName()}, c);
-    assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
-    assertTrue(bn.getRole() + " must be in StandbyState",
-               bn.getNamesystem().getHAState()
-                 .equalsIgnoreCase(HAServiceState.STANDBY.name()));
+      assertTrue(bn.isInSafeMode(), bn.getRole() + " must be in SafeMode.");
+      assertTrue(
+              bn.getNamesystem().getHAState()
+                      .equalsIgnoreCase(HAServiceState.STANDBY.name()), bn.getRole() + " must be in StandbyState");
     return bn;
   }
 
@@ -182,8 +177,8 @@
     try {
       bn = (BackupNode)NameNode.createNameNode(
           new String[] {startupOpt.getName()}, c);
-      assertTrue("Namesystem in BackupNode should be null",
-          bn.getNamesystem() == null);
+        assertTrue(
+                bn.getNamesystem() == null, "Namesystem in BackupNode should be null");
       fail("Incorrect authentication setting should throw IOException");
     } catch (IOException e) {
       LOG.info("IOException thrown.", e);
@@ -247,10 +242,10 @@
       // NN should have received a new image
       long nnImageAfter =
         nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
-      
-      assertTrue("nn should have received new checkpoint. before: " +
-          nnImageBefore + " after: " + nnImageAfter,
-          nnImageAfter > nnImageBefore);
+
+        assertTrue(
+                nnImageAfter > nnImageBefore, "nn should have received new checkpoint. before: " +
+                nnImageBefore + " after: " + nnImageAfter);
 
       // BN should stay in sync after checkpoint
       testBNInSync(cluster, backup, 3);
@@ -265,8 +260,8 @@
       EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd);
       assertEquals(editsLog.getFirstTxId(),
           nn.getFSImage().getEditLog().getCurSegmentTxId());
-      assertTrue("Should not have finalized " + editsLog,
-          editsLog.isInProgress());
+        assertTrue(
+                editsLog.isInProgress(), "Should not have finalized " + editsLog);
       
       // do some edits
       assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
@@ -388,7 +383,7 @@
       waitCheckpointDone(cluster, txid);
     } catch(IOException e) {
       LOG.error("Error in TestBackupNode:", e);
-      assertTrue(e.getLocalizedMessage(), false);
+        assertTrue(false, e.getLocalizedMessage());
     } finally {
       if(backup != null) backup.stop();
       if(fileSys != null) fileSys.close();
@@ -447,7 +442,7 @@
         LOG.info("Write to " + backup.getRole() + " failed as expected: ", eio);
         canWrite = false;
       }
-      assertFalse("Write to BackupNode must be prohibited.", canWrite);
+        assertFalse(canWrite, "Write to BackupNode must be prohibited.");
 
       // Reads are allowed for BackupNode, but not for CheckpointNode
       boolean canRead = true;
@@ -457,18 +452,18 @@
         LOG.info("Read from " + backup.getRole() + " failed: ", eio);
         canRead = false;
       }
-      assertEquals("Reads to BackupNode are allowed, but not CheckpointNode.",
-          canRead, backup.isRole(NamenodeRole.BACKUP));
+        assertEquals(
+                canRead, backup.isRole(NamenodeRole.BACKUP), "Reads to BackupNode are allowed, but not CheckpointNode.");
 
       DFSTestUtil.createFile(fileSys, file3, fileSize, fileSize, blockSize,
           replication, seed);
       
       TestCheckpoint.checkFile(fileSys, file3, replication);
-      // should also be on BN right away
-      assertTrue("file3 does not exist on BackupNode",
-          op != StartupOption.BACKUP ||
-          backup.getNamesystem().getFileInfo(
-              file3.toUri().getPath(), false, false, false) != null);
+        // should also be on BN right away
+        assertTrue(
+                op != StartupOption.BACKUP ||
+                        backup.getNamesystem().getFileInfo(
+                                file3.toUri().getPath(), false, false, false) != null, "file3 does not exist on BackupNode");
 
     } catch(IOException e) {
       LOG.error("Error in TestBackupNode:", e);
@@ -496,7 +491,7 @@
       assertTrue(fileSys.exists(file2));
     } catch(IOException e) {
       LOG.error("Error in TestBackupNode: ", e);
-      assertTrue(e.getLocalizedMessage(), false);
+        assertTrue(false, e.getLocalizedMessage());
     } finally {
       fileSys.close();
       if (cluster != null) {
@@ -546,11 +541,11 @@
           new Path("hdfs://" + bnAddr).toUri(), conf);
       String nnData = DFSTestUtil.readFile(fileSys, file1);
       String bnData = DFSTestUtil.readFile(bnFS, file1);
-      assertEquals("Data read from BackupNode and NameNode is not the same.",
-          nnData, bnData);
+        assertEquals(
+                nnData, bnData, "Data read from BackupNode and NameNode is not the same.");
     } catch(IOException e) {
       LOG.error("Error in TestBackupNode: ", e);
-      assertTrue(e.getLocalizedMessage(), false);
+        assertTrue(false, e.getLocalizedMessage());
     } finally {
       if(fileSys != null) fileSys.close();
       if(backup != null) backup.stop();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
index 88b7d2b..18841e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
@@ -31,15 +31,15 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.StaticMapping;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.*;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestBlockPlacementPolicyRackFaultTolerant {
 
@@ -49,7 +49,7 @@
   private FSNamesystem namesystem = null;
   private PermissionStatus perm = null;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     StaticMapping.resetMap();
     Configuration conf = new HdfsConfiguration();
@@ -78,7 +78,7 @@
         FsPermission.getDefault());
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
index 49e79da..f3b14d0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.List;
@@ -41,10 +39,9 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 
@@ -56,7 +53,7 @@
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem hdfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@@ -64,7 +61,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if(hdfs != null) hdfs.close();
     if(cluster != null) cluster.shutdown();
@@ -92,49 +89,49 @@
                                 boolean isFileOpen) throws IOException {
     FSNamesystem ns = cluster.getNamesystem();
     final INodeFile inode = INodeFile.valueOf(ns.dir.getINode(file), file);
-    assertTrue("File " + inode.toString() +
-        " isUnderConstruction = " + inode.isUnderConstruction() +
-        " expected to be " + isFileOpen,
-        inode.isUnderConstruction() == isFileOpen);
+      assertTrue(
+              inode.isUnderConstruction() == isFileOpen, "File " + inode.toString() +
+              " isUnderConstruction = " + inode.isUnderConstruction() +
+              " expected to be " + isFileOpen);
     BlockInfo[] blocks = inode.getBlocks();
-    assertTrue("File does not have blocks: " + inode.toString(),
-        blocks != null && blocks.length > 0);
+      assertTrue(
+              blocks != null && blocks.length > 0, "File does not have blocks: " + inode.toString());
     
     int idx = 0;
     BlockInfo curBlock;
     // all blocks but the last two should be regular blocks
     for(; idx < blocks.length - 2; idx++) {
       curBlock = blocks[idx];
-      assertTrue("Block is not complete: " + curBlock,
-          curBlock.isComplete());
-      assertTrue("Block is not in BlocksMap: " + curBlock,
-          ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
+        assertTrue(
+                curBlock.isComplete(), "Block is not complete: " + curBlock);
+        assertTrue(
+                ns.getBlockManager().getStoredBlock(curBlock) == curBlock, "Block is not in BlocksMap: " + curBlock);
     }
 
     // the penultimate block is either complete or
     // committed if the file is not closed
     if(idx > 0) {
       curBlock = blocks[idx-1]; // penultimate block
-      assertTrue("Block " + curBlock +
-          " isUnderConstruction = " + inode.isUnderConstruction() +
-          " expected to be " + isFileOpen,
-          (isFileOpen && curBlock.isComplete()) ||
-          (!isFileOpen && !curBlock.isComplete() == 
-            (curBlock.getBlockUCState() ==
-              BlockUCState.COMMITTED)));
-      assertTrue("Block is not in BlocksMap: " + curBlock,
-          ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
+        assertTrue(
+                (isFileOpen && curBlock.isComplete()) ||
+                        (!isFileOpen && !curBlock.isComplete() ==
+                                (curBlock.getBlockUCState() ==
+                                        BlockUCState.COMMITTED)), "Block " + curBlock +
+                " isUnderConstruction = " + inode.isUnderConstruction() +
+                " expected to be " + isFileOpen);
+        assertTrue(
+                ns.getBlockManager().getStoredBlock(curBlock) == curBlock, "Block is not in BlocksMap: " + curBlock);
     }
 
     // The last block is complete if the file is closed.
     // If the file is open, the last block may be complete or not. 
     curBlock = blocks[idx]; // last block
     if (!isFileOpen) {
-      assertTrue("Block " + curBlock + ", isFileOpen = " + isFileOpen,
-          curBlock.isComplete());
+        assertTrue(
+                curBlock.isComplete(), "Block " + curBlock + ", isFileOpen = " + isFileOpen);
     }
-    assertTrue("Block is not in BlocksMap: " + curBlock,
-        ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
+      assertTrue(
+              ns.getBlockManager().getStoredBlock(curBlock) == curBlock, "Block is not in BlocksMap: " + curBlock);
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
index 9a52548..1f4e960 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
@@ -25,12 +25,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.protocol.CachePoolInfo.RELATIVE_EXPIRY_NEVER;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -90,10 +85,10 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.GSet;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 import java.util.function.Supplier;
@@ -142,7 +137,7 @@
     return this.conf;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = createCachingConf();
     cluster =
@@ -163,7 +158,7 @@
     return (DistributedFileSystem) FileSystem.get(conf);
   }
 
-  @After
+  @AfterEach
   public void teardown() throws Exception {
     // Remove cache directives left behind by tests so that we release mmaps.
     RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(null);
@@ -378,7 +373,7 @@
 
     dfs.removeCachePool(poolName);
     iter = dfs.listCachePools();
-    assertFalse("expected no cache pools after deleting pool", iter.hasNext());
+      assertFalse(iter.hasNext(), "expected no cache pools after deleting pool");
 
     proto.listCachePools(null);
 
@@ -400,18 +395,18 @@
     }
 
     iter = dfs.listCachePools();
-    assertFalse("expected no cache pools after deleting pool", iter.hasNext());
+      assertFalse(iter.hasNext(), "expected no cache pools after deleting pool");
   }
 
   private static void validateListAll(
       RemoteIterator<CacheDirectiveEntry> iter,
       Long... ids) throws Exception {
     for (Long id: ids) {
-      assertTrue("Unexpectedly few elements", iter.hasNext());
-      assertEquals("Unexpected directive ID", id,
-          iter.next().getInfo().getId());
+        assertTrue(iter.hasNext(), "Unexpectedly few elements");
+        assertEquals(id,
+                iter.next().getInfo().getId(), "Unexpected directive ID");
     }
-    assertFalse("Unexpectedly many list elements", iter.hasNext());
+      assertFalse(iter.hasNext(), "Unexpectedly many list elements");
   }
 
   private static long addAsUnprivileged(
@@ -456,9 +451,9 @@
 
     long alphaId = addAsUnprivileged(alpha);
     long alphaId2 = addAsUnprivileged(alpha);
-    assertFalse("Expected to get unique directives when re-adding an "
-        + "existing CacheDirectiveInfo",
-        alphaId == alphaId2);
+      assertFalse(
+              alphaId == alphaId2, "Expected to get unique directives when re-adding an "
+              + "existing CacheDirectiveInfo");
     long betaId = addAsUnprivileged(beta);
 
     try {
@@ -650,14 +645,14 @@
           .setLimit(limit)
           .setMaxRelativeExpiryMs(maxExpiry));
       RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
-      assertTrue("No cache pools found", pit.hasNext());
+        assertTrue(pit.hasNext(), "No cache pools found");
       CachePoolInfo info = pit.next().getInfo();
       assertEquals(pool, info.getPoolName());
       assertEquals(groupName, info.getGroupName());
       assertEquals(mode, info.getMode());
       assertEquals(limit, (long)info.getLimit());
       assertEquals(maxExpiry, (long)info.getMaxRelativeExpiryMs());
-      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+        assertFalse(pit.hasNext(), "Unexpected # of cache pools found");
     
       // Create some cache entries
       int numEntries = 10;
@@ -675,13 +670,13 @@
       RemoteIterator<CacheDirectiveEntry> dit
           = dfs.listCacheDirectives(null);
       for (int i=0; i<numEntries; i++) {
-        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+          assertTrue(dit.hasNext(), "Unexpected # of cache entries: " + i);
         CacheDirectiveInfo cd = dit.next().getInfo();
         assertEquals(i+1, cd.getId().longValue());
         assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
         assertEquals(pool, cd.getPool());
       }
-      assertFalse("Unexpected # of cache directives found", dit.hasNext());
+        assertFalse(dit.hasNext(), "Unexpected # of cache directives found");
       
       // Checkpoint once to set some cache pools and directives on 2NN side
       secondary.doCheckpoint();
@@ -699,8 +694,8 @@
 
       // Checkpoint again forcing a reload of FSN state
       boolean fetchImage = secondary.doCheckpoint();
-      assertTrue("Secondary should have fetched a new fsimage from NameNode",
-          fetchImage);
+        assertTrue(
+                fetchImage, "Secondary should have fetched a new fsimage from NameNode");
 
       // Remove temp pool and directive
       dfs.removeCachePool(imagePool);
@@ -710,7 +705,7 @@
     
       // Check that state came back up
       pit = dfs.listCachePools();
-      assertTrue("No cache pools found", pit.hasNext());
+        assertTrue(pit.hasNext(), "No cache pools found");
       info = pit.next().getInfo();
       assertEquals(pool, info.getPoolName());
       assertEquals(pool, info.getPoolName());
@@ -718,18 +713,18 @@
       assertEquals(mode, info.getMode());
       assertEquals(limit, (long)info.getLimit());
       assertEquals(maxExpiry, (long)info.getMaxRelativeExpiryMs());
-      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+        assertFalse(pit.hasNext(), "Unexpected # of cache pools found");
     
       dit = dfs.listCacheDirectives(null);
       for (int i=0; i<numEntries; i++) {
-        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+          assertTrue(dit.hasNext(), "Unexpected # of cache entries: " + i);
         CacheDirectiveInfo cd = dit.next().getInfo();
         assertEquals(i+1, cd.getId().longValue());
         assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
         assertEquals(pool, cd.getPool());
         assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
       }
-      assertFalse("Unexpected # of cache directives found", dit.hasNext());
+        assertFalse(dit.hasNext(), "Unexpected # of cache directives found");
   
       long nextId = dfs.addCacheDirective(
             new CacheDirectiveInfo.Builder().
@@ -817,7 +812,7 @@
           fail("got IOException while calling " +
               "listCacheDirectives: " + e.getMessage());
         }
-        Assert.assertNotNull(entry);
+        Assertions.assertNotNull(entry);
         CacheDirectiveStats stats = entry.getStats();
         if ((targetBytesNeeded == stats.getBytesNeeded()) &&
             (targetBytesCached == stats.getBytesCached()) &&
@@ -914,8 +909,8 @@
       // round it up to full blocks
       final long numBlocks = (len + blockSize - 1) / blockSize;
       BlockLocation[] locs = dfs.getFileBlockLocations(p, 0, len);
-      assertEquals("Unexpected number of block locations for path " + p,
-          numBlocks, locs.length);
+        assertEquals(
+                numBlocks, locs.length, "Unexpected number of block locations for path " + p);
       for (BlockLocation l: locs) {
         if (l.getCachedHosts().length > 0) {
           numCachedBlocks++;
@@ -926,10 +921,10 @@
     LOG.info("Found " + numCachedBlocks + " of " + expectedBlocks + " blocks");
     LOG.info("Found " + numCachedReplicas + " of " + expectedReplicas
         + " replicas");
-    assertEquals("Unexpected number of cached blocks", expectedBlocks,
-        numCachedBlocks);
-    assertEquals("Unexpected number of cached replicas", expectedReplicas,
-        numCachedReplicas);
+      assertEquals(expectedBlocks,
+              numCachedBlocks, "Unexpected number of cached blocks");
+      assertEquals(expectedReplicas,
+              numCachedReplicas, "Unexpected number of cached replicas");
   }
 
   @Test(timeout=120000)
@@ -985,17 +980,17 @@
 
     // Check that the datanodes have the right cache values
     DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
-    assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
+      assertEquals(NUM_DATANODES, live.length, "Unexpected number of live nodes");
     long totalUsed = 0;
     for (DatanodeInfo dn : live) {
       final long cacheCapacity = dn.getCacheCapacity();
       final long cacheUsed = dn.getCacheUsed();
       final long cacheRemaining = dn.getCacheRemaining();
-      assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
-      assertEquals("Capacity not equal to used + remaining",
-          cacheCapacity, cacheUsed + cacheRemaining);
-      assertEquals("Remaining not equal to capacity - used",
-          cacheCapacity - cacheUsed, cacheRemaining);
+        assertEquals(CACHE_CAPACITY, cacheCapacity, "Unexpected cache capacity");
+        assertEquals(
+                cacheCapacity, cacheUsed + cacheRemaining, "Capacity not equal to used + remaining");
+        assertEquals(
+                cacheCapacity - cacheUsed, cacheRemaining, "Remaining not equal to capacity - used");
       totalUsed += cacheUsed;
     }
     assertEquals(expected*BLOCK_SIZE, totalUsed);
@@ -1174,11 +1169,11 @@
     RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
     CachePoolInfo info = it.next().getInfo();
     assertFalse(it.hasNext());
-    assertEquals("Expected pool name", poolName, info.getPoolName());
-    assertNull("Unexpected owner name", info.getOwnerName());
-    assertNull("Unexpected group name", info.getGroupName());
-    assertNull("Unexpected mode", info.getMode());
-    assertNull("Unexpected limit", info.getLimit());
+      assertEquals(poolName, info.getPoolName(), "Expected pool name");
+      assertNull(info.getOwnerName(), "Unexpected owner name");
+      assertNull(info.getGroupName(), "Unexpected group name");
+      assertNull(info.getMode(), "Unexpected mode");
+      assertNull(info.getLimit(), "Unexpected limit");
     // Modify the pool so myuser is now the owner
     final long limit = 99;
     dfs.modifyCachePool(new CachePoolInfo(poolName)
@@ -1188,13 +1183,13 @@
     it = myDfs.listCachePools();
     info = it.next().getInfo();
     assertFalse(it.hasNext());
-    assertEquals("Expected pool name", poolName, info.getPoolName());
-    assertEquals("Mismatched owner name", myUser.getShortUserName(),
-        info.getOwnerName());
-    assertNotNull("Expected group name", info.getGroupName());
-    assertEquals("Mismatched mode", (short) 0700,
-        info.getMode().toShort());
-    assertEquals("Mismatched limit", limit, (long)info.getLimit());
+      assertEquals(poolName, info.getPoolName(), "Expected pool name");
+      assertEquals(myUser.getShortUserName(),
+              info.getOwnerName(), "Mismatched owner name");
+      assertNotNull(info.getGroupName(), "Expected group name");
+      assertEquals((short) 0700,
+              info.getMode().toShort(), "Mismatched mode");
+      assertEquals(limit, (long) info.getLimit(), "Mismatched limit");
   }
 
   @Test(timeout=120000)
@@ -1221,8 +1216,8 @@
     CacheDirectiveEntry ent = it.next();
     assertFalse(it.hasNext());
     Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
-    assertTrue("Directive should have expired",
-        entryExpiry.before(new Date()));
+      assertTrue(
+              entryExpiry.before(new Date()), "Directive should have expired");
     // Change it back to expire later
     dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
         .setExpiration(Expiration.newRelative(120000)).build());
@@ -1231,8 +1226,8 @@
     ent = it.next();
     assertFalse(it.hasNext());
     entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
-    assertTrue("Directive should not have expired",
-        entryExpiry.after(new Date()));
+      assertTrue(
+              entryExpiry.after(new Date()), "Directive should not have expired");
     // Verify that setting a negative TTL throws an error
     try {
       dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
@@ -1293,10 +1288,10 @@
         1, 0,
         poolInfo, "testLimit:2");
     RemoteIterator<CachePoolEntry> it = dfs.listCachePools();
-    assertTrue("Expected a cache pool", it.hasNext());
+      assertTrue(it.hasNext(), "Expected a cache pool");
     CachePoolStats stats = it.next().getStats();
-    assertEquals("Overlimit bytes should be difference of needed and limit",
-        BLOCK_SIZE, stats.getBytesOverlimit());
+      assertEquals(
+              BLOCK_SIZE, stats.getBytesOverlimit(), "Overlimit bytes should be difference of needed and limit");
     // Moving a directive to a pool without enough limit should fail
     CachePoolInfo inadequate =
         new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
@@ -1340,9 +1335,9 @@
     dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
     RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
     CachePoolInfo listPool = poolIt.next().getInfo();
-    assertFalse("Should only be one pool", poolIt.hasNext());
-    assertEquals("Expected max relative expiry to match set value",
-        poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
+      assertFalse(poolIt.hasNext(), "Should only be one pool");
+      assertEquals(
+              poolExpiration, listPool.getMaxRelativeExpiryMs().longValue(), "Expected max relative expiry to match set value");
     // Test that negative and really big max expirations can't be modified
     try {
       dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
@@ -1366,11 +1361,11 @@
     RemoteIterator<CacheDirectiveEntry> dirIt =
         dfs.listCacheDirectives(defaultExpiry);
     CacheDirectiveInfo listInfo = dirIt.next().getInfo();
-    assertFalse("Should only have one entry in listing", dirIt.hasNext());
+      assertFalse(dirIt.hasNext(), "Should only have one entry in listing");
     long listExpiration = listInfo.getExpiration().getAbsoluteMillis()
         - new Date().getTime();
-    assertTrue("Directive expiry should be approximately the pool's max expiry",
-        Math.abs(listExpiration - poolExpiration) < 10*1000);
+      assertTrue(
+              Math.abs(listExpiration - poolExpiration) < 10 * 1000, "Directive expiry should be approximately the pool's max expiry");
     // Test that the max is enforced on add for relative and absolute
     CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder()
         .setPath(new Path("/lolcat"))
@@ -1474,9 +1469,9 @@
     listInfo = dirIt.next().getInfo();
     listExpiration = listInfo.getExpiration().getAbsoluteMillis()
         - new Date().getTime();
-    assertTrue("Unexpected relative expiry " + listExpiration
-        + " expected approximately " + poolExpiration/2,
-        Math.abs(poolExpiration/2 - listExpiration) < 10*1000);
+      assertTrue(
+              Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000, "Unexpected relative expiry " + listExpiration
+              + " expected approximately " + poolExpiration / 2);
     // Test that cache pool and directive expiry can be modified back to never
     dfs.modifyCachePool(destPool
         .setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
@@ -1485,9 +1480,9 @@
     while (!listPool.getPoolName().equals(destPool.getPoolName())) {
       listPool = poolIt.next().getInfo();
     }
-    assertEquals("Expected max relative expiry to match set value",
-        CachePoolInfo.RELATIVE_EXPIRY_NEVER,
-        listPool.getMaxRelativeExpiryMs().longValue());
+      assertEquals(
+              CachePoolInfo.RELATIVE_EXPIRY_NEVER,
+              listPool.getMaxRelativeExpiryMs().longValue(), "Expected max relative expiry to match set value");
     dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder()
         .setId(listInfo.getId())
         .setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER))
@@ -1512,10 +1507,10 @@
       for (DataNode dn : cluster.getDataNodes()) {
         DatanodeDescriptor descriptor =
             datanodeManager.getDatanode(dn.getDatanodeId());
-        Assert.assertTrue("Pending cached list of " + descriptor +
-                " is not empty, "
-                + Arrays.toString(descriptor.getPendingCached().toArray()), 
-            descriptor.getPendingCached().isEmpty());
+          Assertions.assertTrue(
+                  descriptor.getPendingCached().isEmpty(), "Pending cached list of " + descriptor +
+                  " is not empty, "
+                  + Arrays.toString(descriptor.getPendingCached().toArray()));
       }
     } finally {
       cluster.getNamesystem().readUnlock();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
index cff4e1f..0f27c08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 
@@ -36,7 +34,7 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the creation and validation of a checkpoint.
@@ -90,8 +88,8 @@
         assertTrue(log.isInProgress());
         log.scanLog(Long.MAX_VALUE, true);
         long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
-        assertEquals("In-progress log " + log + " should have 5 transactions",
-                     5, numTransactions);;
+          assertEquals(
+                  5, numTransactions, "In-progress log " + log + " should have 5 transactions");;
       }
 
       // Saving image in safe mode should succeed
@@ -107,8 +105,8 @@
         assertTrue(log.isInProgress());
         log.scanLog(Long.MAX_VALUE, true);
         long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
-        assertEquals("In-progress log " + log + " should only have START txn",
-            1, numTransactions);
+          assertEquals(
+                  1, numTransactions, "In-progress log " + log + " should only have START txn");
       }
 
       // restart cluster
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 064d5ae..fbd3766 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -24,13 +24,7 @@
 import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
 import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -89,9 +83,9 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -128,14 +122,14 @@
 
   private CheckpointFaultInjector faultInjector;
     
-  @Before
+  @BeforeEach
   public void setUp() {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
     faultInjector = Mockito.mock(CheckpointFaultInjector.class);
     CheckpointFaultInjector.instance = faultInjector;
   }
   
-  @After
+  @AfterEach
   public void checkForSNNThreads() {
     GenericTestUtils.assertNoThreadsMatching(".*SecondaryNameNode.*");
   }
@@ -144,7 +138,7 @@
     throws IOException {
     assertTrue(fileSys.exists(name));
     int replication = fileSys.getFileStatus(name).getReplication();
-    assertEquals("replication for " + name, repl, replication);
+      assertEquals(repl, replication, "replication for " + name);
     //We should probably test for more of the file properties.    
   }
   
@@ -201,29 +195,29 @@
     ArrayList<URI> editsDirs = new ArrayList<URI>();
     File filePath =
       new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
-    assertTrue("Couldn't create directory storageDirToCheck",
-               filePath.exists() || filePath.mkdirs());
+      assertTrue(
+              filePath.exists() || filePath.mkdirs(), "Couldn't create directory storageDirToCheck");
     fsImageDirs.add(filePath.toURI());
     editsDirs.add(filePath.toURI());
     NNStorage nnStorage = new NNStorage(new HdfsConfiguration(),
       fsImageDirs, editsDirs);
     try {
-      assertTrue("List of storage directories didn't have storageDirToCheck.",
-                 nnStorage.getEditsDirectories().iterator().next().
-                 toString().indexOf("storageDirToCheck") != -1);
-      assertTrue("List of removed storage directories wasn't empty",
-                 nnStorage.getRemovedStorageDirs().isEmpty());
+        assertTrue(
+                nnStorage.getEditsDirectories().iterator().next().
+                        toString().indexOf("storageDirToCheck") != -1, "List of storage directories didn't have storageDirToCheck.");
+        assertTrue(
+                nnStorage.getRemovedStorageDirs().isEmpty(), "List of removed storage directories wasn't empty");
     } finally {
-      // Delete storage directory to cause IOException in writeTransactionIdFile 
-      assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),
-                 filePath.delete());
+        // Delete storage directory to cause IOException in writeTransactionIdFile 
+        assertTrue(
+                filePath.delete(), "Couldn't remove directory " + filePath.getAbsolutePath());
     }
     // Just call writeTransactionIdFile using any random number
     nnStorage.writeTransactionIdFileToStorage(1);
     List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
-    assertTrue("Removed directory wasn't what was expected",
-               listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
-               toString().indexOf("storageDirToCheck") != -1);
+      assertTrue(
+              listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
+                      toString().indexOf("storageDirToCheck") != -1, "Removed directory wasn't what was expected");
     nnStorage.close();
   }
 
@@ -266,9 +260,9 @@
       // The error must be recorded, so next checkpoint will reload image.
       fos.write(new byte[] { 0, 1, 2, 3 });
       fos.hsync();
-      
-      assertTrue("Another checkpoint should have reloaded image",
-          secondary.doCheckpoint());
+
+        assertTrue(
+                secondary.doCheckpoint(), "Another checkpoint should have reloaded image");
     } finally {
       if (fs != null) {
         fs.close();
@@ -315,7 +309,7 @@
     } catch (ExitException ee) {
       // ignore
       ExitUtil.resetFirstExitException();
-      assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1);
+        assertEquals(1, secondary.getMergeErrorCount() - 1, "Max retries");
     } finally {
       if (fs != null) {
         fs.close();
@@ -830,8 +824,8 @@
       savedSd.lock();
       try {
         secondary = startSecondaryNameNode(conf);
-        assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
-            savedSd.isLockSupported());
+          assertFalse(
+                  savedSd.isLockSupported(), "Should fail to start 2NN when " + savedSd + " is locked");
       } catch (IOException ioe) {
         GenericTestUtils.assertExceptionContains("already locked", ioe);
       } finally {
@@ -876,8 +870,8 @@
           " " + ManagementFactory.getRuntimeMXBean().getName();
         String expectedLogMessage = "It appears that another node "
           + lockingJvmName + " has already locked the storage directory";
-        assertTrue("Log output does not contain expected log message: "
-          + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
+          assertTrue(logs.getOutput().contains(expectedLogMessage), "Log output does not contain expected log message: "
+                  + expectedLogMessage);
       }
     } finally {
       cleanup(cluster);
@@ -913,8 +907,8 @@
     try {      
       cluster = new MiniDFSCluster.Builder(conf).format(false)
           .manageNameDfsDirs(false).numDataNodes(0).build();
-      assertFalse("cluster should fail to start after locking " +
-          sdToLock, sdToLock.isLockSupported());
+        assertFalse(sdToLock.isLockSupported(), "cluster should fail to start after locking " +
+                sdToLock);
     } catch (IOException ioe) {
       GenericTestUtils.assertExceptionContains("already locked", ioe);
     } finally {
@@ -982,9 +976,9 @@
     try {
       cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0)
           .startupOption(StartupOption.IMPORT).build();
-      
-      assertTrue("Path from checkpoint should exist after import",
-          cluster.getFileSystem().exists(testPath));
+
+        assertTrue(
+                cluster.getFileSystem().exists(testPath), "Path from checkpoint should exist after import");
 
       // Make sure that the image got saved on import
       FSImageTestUtil.assertNNHasCheckpoints(cluster, Ints.asList(3));
@@ -1220,8 +1214,8 @@
         File savedImage = new File(imageDir, "current/"
                                    + NNStorage.getImageFileName(
                                        EXPECTED_TXNS_FIRST_SEG));
-        assertTrue("Should have saved image at " + savedImage,
-            savedImage.exists());        
+          assertTrue(
+                  savedImage.exists(), "Should have saved image at " + savedImage);        
       }
 
       // restart cluster and verify file exists
@@ -1264,7 +1258,7 @@
       sig.clusterID = "somerandomcid";
       try {
         sig.validateStorageInfo(nn.getFSImage()); // this should fail
-        assertTrue("This test is expected to fail.", false);
+          assertTrue(false, "This test is expected to fail.");
       } catch (Exception ignored) {
       }
     } finally {
@@ -1433,36 +1427,36 @@
           NNStorage.getImageFileName(expectedTxIdToDownload));
       File secondaryFsImageAfter = new File(secondaryCurrent,
           NNStorage.getImageFileName(expectedTxIdToDownload + 2));
-      
-      assertFalse("Secondary should start with empty current/ dir " +
-          "but " + secondaryFsImageBefore + " exists",
-          secondaryFsImageBefore.exists());
 
-      assertTrue("Secondary should have loaded an image",
-          secondary.doCheckpoint());
-      
-      assertTrue("Secondary should have downloaded original image",
-          secondaryFsImageBefore.exists());
-      assertTrue("Secondary should have created a new image",
-          secondaryFsImageAfter.exists());
+        assertFalse(
+                secondaryFsImageBefore.exists(), "Secondary should start with empty current/ dir " +
+                "but " + secondaryFsImageBefore + " exists");
+
+        assertTrue(
+                secondary.doCheckpoint(), "Secondary should have loaded an image");
+
+        assertTrue(
+                secondaryFsImageBefore.exists(), "Secondary should have downloaded original image");
+        assertTrue(
+                secondaryFsImageAfter.exists(), "Secondary should have created a new image");
       
       long fsimageLength = secondaryFsImageBefore.length();
-      assertEquals("Image size should not have changed",
-          fsimageLength,
-          secondaryFsImageAfter.length());
+        assertEquals(
+                fsimageLength,
+                secondaryFsImageAfter.length(), "Image size should not have changed");
 
       // change namespace
       fileSys.mkdirs(dir);
-      
-      assertFalse("Another checkpoint should not have to re-load image",
-          secondary.doCheckpoint());
+
+        assertFalse(
+                secondary.doCheckpoint(), "Another checkpoint should not have to re-load image");
       
       for (StorageDirectory sd :
         image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
         File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE,
             expectedTxIdToDownload + 5);
-        assertTrue("Image size increased",
-            imageFile.length() > fsimageLength);
+          assertTrue(
+                  imageFile.length() > fsimageLength, "Image size increased");
       }
 
     } finally {
@@ -1559,9 +1553,9 @@
       for (StorageDirectory sd : secondary.getFSImage().getStorage()
           .dirIterable(NameNodeDirType.EDITS)) {
         File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
-        assertTrue(
-            "Expected a single tmp edits file in directory " + sd.toString(),
-            tmpEdits.length == 1);
+          assertTrue(
+                  tmpEdits.length == 1,
+                  "Expected a single tmp edits file in directory " + sd.toString());
         RandomAccessFile randFile = new RandomAccessFile(tmpEdits[0], "rw");
         randFile.setLength(0);
         randFile.close();
@@ -1673,9 +1667,9 @@
       for (StorageDirectory sd : secondary.getFSImage().getStorage()
           .dirIterable(NameNodeDirType.EDITS)) {
         File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
-        assertTrue(
-            "Expected a single tmp edits file in directory " + sd.toString(),
-            tmpEdits.length == 1);
+          assertTrue(
+                  tmpEdits.length == 1,
+                  "Expected a single tmp edits file in directory " + sd.toString());
       }
       // Restart 2NN
       secondary.shutdown();
@@ -1684,9 +1678,9 @@
       for (StorageDirectory sd : secondary.getFSImage().getStorage()
           .dirIterable(NameNodeDirType.EDITS)) {
         File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
-        assertTrue(
-            "Did not expect a tmp edits file in directory " + sd.toString(),
-            tmpEdits.length == 0);
+          assertTrue(
+                  tmpEdits.length == 0,
+                  "Did not expect a tmp edits file in directory " + sd.toString());
       }
       // Next checkpoint should succeed
       secondary.doCheckpoint();
@@ -2001,7 +1995,7 @@
         fail("Storage info was not verified");
       } catch (IOException ioe) {
         String msg = StringUtils.stringifyException(ioe);
-        assertTrue(msg, msg.contains("but the secondary expected"));
+          assertTrue(msg.contains("but the secondary expected"), msg);
       }
 
       try {
@@ -2009,7 +2003,7 @@
         fail("Storage info was not verified");
       } catch (IOException ioe) {
         String msg = StringUtils.stringifyException(ioe);
-        assertTrue(msg, msg.contains("but the secondary expected"));
+          assertTrue(msg.contains("but the secondary expected"), msg);
       }
 
       try {
@@ -2018,7 +2012,7 @@
         fail("Storage info was not verified");
       } catch (IOException ioe) {
         String msg = StringUtils.stringifyException(ioe);
-        assertTrue(msg, msg.contains("but the secondary expected"));
+          assertTrue(msg.contains("but the secondary expected"), msg);
       }
     } finally {
       cleanup(cluster);
@@ -2279,8 +2273,8 @@
       for (File checkpointDir : checkpointDirs) {
         List<EditLogFile> editsFiles = FileJournalManager.matchEditLogs(
             checkpointDir);
-        assertEquals("Edit log files were not purged from 2NN", 1,
-            editsFiles.size());
+          assertEquals(1,
+                  editsFiles.size(), "Edit log files were not purged from 2NN");
       }
       
     } finally {
@@ -2443,14 +2437,14 @@
       // Checkpoint once
       secondary.doCheckpoint();
       String files1[] = tmpDir.list();
-      assertEquals("Only one file is expected", 1, files1.length);
+        assertEquals(1, files1.length, "Only one file is expected");
 
       // Perform more checkpointngs and check whether retention management
       // is working.
       secondary.doCheckpoint();
       secondary.doCheckpoint();
       String files2[] = tmpDir.list();
-      assertEquals("Two files are expected", 2, files2.length);
+        assertEquals(2, files2.length, "Two files are expected");
 
       // Verify that the first file is deleted.
       for (String fName : files2) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClientNameNodeAddress.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClientNameNodeAddress.java
index 829680e..386b61c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClientNameNodeAddress.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClientNameNodeAddress.java
@@ -21,7 +21,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -29,7 +29,8 @@
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
index 0e648e3..e1b5520 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -49,9 +46,9 @@
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestClusterId {
   private static final Logger LOG =
@@ -77,7 +74,7 @@
     return cid;
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     ExitUtil.disableSystemExit();
 
@@ -98,7 +95,7 @@
     config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
       throw new IOException("Could not tearDown test directory '" + hdfsDir
@@ -114,21 +111,21 @@
     NameNode.format(config);
     // see if cluster id not empty.
     String cid = getClusterId(config);
-    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")) );
+      assertTrue((cid != null && !cid.equals("")), "Didn't get new ClusterId");
 
     // 2. successful format with given clusterid
     StartupOption.FORMAT.setClusterId("mycluster");
     NameNode.format(config);
     // see if cluster id matches with given clusterid.
     cid = getClusterId(config);
-    assertTrue("ClusterId didn't match", cid.equals("mycluster"));
+      assertTrue(cid.equals("mycluster"), "ClusterId didn't match");
 
     // 3. format without any clusterid again. It should generate new
     //clusterid.
     StartupOption.FORMAT.setClusterId("");
     NameNode.format(config);
     String newCid = getClusterId(config);
-    assertFalse("ClusterId should not be the same", newCid.equals(cid));
+      assertFalse(newCid.equals(cid), "ClusterId should not be the same");
   }
 
   /**
@@ -143,11 +140,11 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should have succeeded", 0, e.status);
+        assertEquals(0, e.status, "Format should have succeeded");
     }
 
     String cid = getClusterId(config);
-    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+      assertTrue((cid != null && !cid.equals("")), "Didn't get new ClusterId");
   }
 
   /**
@@ -168,11 +165,11 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should have succeeded", 0, e.status);
+        assertEquals(0, e.status, "Format should have succeeded");
     }
 
     String cid = getClusterId(config);
-    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+      assertTrue((cid != null && !cid.equals("")), "Didn't get new ClusterId");
   }
 
   /**
@@ -193,11 +190,11 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should have succeeded", 0, e.status);
+        assertEquals(0, e.status, "Format should have succeeded");
     }
 
     String cid = getClusterId(config);
-    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+      assertTrue((cid != null && !cid.equals("")), "Didn't get new ClusterId");
   }
 
   /**
@@ -219,11 +216,11 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should have succeeded", 0, e.status);
+        assertEquals(0, e.status, "Format should have succeeded");
     }
 
     String cId = getClusterId(config);
-    assertEquals("ClusterIds do not match", myId, cId);
+      assertEquals(myId, cId, "ClusterIds do not match");
   }
 
   /**
@@ -251,7 +248,7 @@
 
     // check if the version file does not exists.
     File version = new File(hdfsDir, "current/VERSION");
-    assertFalse("Check version should not exist", version.exists());
+      assertFalse(version.exists(), "Check version should not exist");
   }
 
   /**
@@ -279,7 +276,7 @@
 
     // check if the version file does not exists.
     File version = new File(hdfsDir, "current/VERSION");
-    assertFalse("Check version should not exist", version.exists());
+      assertFalse(version.exists(), "Check version should not exist");
   }
 
   /**
@@ -308,7 +305,7 @@
 
     // check if the version file does not exists.
     File version = new File(hdfsDir, "current/VERSION");
-    assertFalse("Check version should not exist", version.exists());
+      assertFalse(version.exists(), "Check version should not exist");
   }
 
   /**
@@ -331,13 +328,13 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should have been aborted with exit code 1", 1,
-          e.status);
+        assertEquals(1,
+                e.status, "Format should have been aborted with exit code 1");
     }
 
     // check if the version file does not exists.
     File version = new File(hdfsDir, "current/VERSION");
-    assertFalse("Check version should not exist", version.exists());
+      assertFalse(version.exists(), "Check version should not exist");
   }
 
   /**
@@ -355,11 +352,11 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should have succeeded", 0, e.status);
+        assertEquals(0, e.status, "Format should have succeeded");
     }
 
     String cid = getClusterId(config);
-    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+      assertTrue((cid != null && !cid.equals("")), "Didn't get new ClusterId");
   }
 
   /**
@@ -380,11 +377,11 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should have succeeded", 0, e.status);
+        assertEquals(0, e.status, "Format should have succeeded");
     }
 
     String cid = getClusterId(config);
-    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+      assertTrue((cid != null && !cid.equals("")), "Didn't get new ClusterId");
   }
 
   /**
@@ -415,13 +412,13 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should have succeeded", 0, e.status);
+        assertEquals(0, e.status, "Format should have succeeded");
     }
 
     System.setIn(origIn);
 
     String cid = getClusterId(config);
-    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+      assertTrue((cid != null && !cid.equals("")), "Didn't get new ClusterId");
   }
 
   /**
@@ -451,14 +448,14 @@
       NameNode.createNameNode(argv, config);
       fail("createNameNode() did not call System.exit()");
     } catch (ExitException e) {
-      assertEquals("Format should not have succeeded", 1, e.status);
+        assertEquals(1, e.status, "Format should not have succeeded");
     }
 
     System.setIn(origIn);
 
     // check if the version file does not exists.
     File version = new File(hdfsDir, "current/VERSION");
-    assertFalse("Check version should not exist", version.exists());
+      assertFalse(version.exists(), "Check version should not exist");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index fd9fee5..a5e8e5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -27,11 +27,11 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.*;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockWithInvalidGenStamp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockWithInvalidGenStamp.java
index f7b1ea5..2ce4683 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockWithInvalidGenStamp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockWithInvalidGenStamp.java
@@ -29,10 +29,10 @@
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
@@ -43,7 +43,7 @@
   private FSDirectory dir;
   private DistributedFileSystem dfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     final Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -54,7 +54,7 @@
     dfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -83,9 +83,9 @@
       try{
         dfs.getClient().getNamenode().complete(file.toString(),
             dfs.getClient().getClientName(), previous, fileNode.getId());
-        Assert.fail("should throw exception because invalid genStamp");
+        Assertions.fail("should throw exception because invalid genStamp");
       } catch (IOException e) {
-        Assert.assertTrue(e.toString().contains(
+        Assertions.assertTrue(e.toString().contains(
             "Commit block with mismatching GS. NN has " +
             newBlock + ", client submits " + newBlockClone));
       }
@@ -93,7 +93,7 @@
           newBlock);
       boolean complete =  dfs.getClient().getNamenode().complete(file.toString(),
       dfs.getClient().getClientName(), previous, fileNode.getId());
-      Assert.assertTrue("should complete successfully", complete);
+        Assertions.assertTrue(complete, "should complete successfully");
     } finally {
       IOUtils.cleanupWithLogger(null, out);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCreateEditsLog.java
index d3527f5..7920e48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCreateEditsLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCreateEditsLog.java
@@ -17,16 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 
 import java.io.File;
 
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
@@ -48,13 +47,13 @@
 
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     deleteIfExists(HDFS_DIR);
     deleteIfExists(TEST_DIR);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 01ea148..dc8aefd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -19,10 +19,7 @@
 
 import java.util.function.Supplier;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.HashSet;
@@ -56,8 +53,8 @@
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test to ensure requests from dead datnodes are rejected by namenode with
@@ -68,7 +65,7 @@
       LoggerFactory.getLogger(TestDeadDatanode.class);
   private MiniDFSCluster cluster;
 
-  @After
+  @AfterEach
   public void cleanup() {
     if (cluster != null) {
       cluster.shutdown();
@@ -179,8 +176,8 @@
         clientNode, new HashSet<>(), 256 * 1024 * 1024L, null, (byte) 7,
         BlockType.CONTIGUOUS, null, null);
     for (DatanodeStorageInfo datanodeStorageInfo : results) {
-      assertFalse("Dead node should not be chosen", datanodeStorageInfo
-          .getDatanodeDescriptor().equals(clientNode));
+        assertFalse(datanodeStorageInfo
+                .getDatanodeDescriptor().equals(clientNode), "Dead node should not be chosen");
     }
   }
 
@@ -206,11 +203,11 @@
           .getDatanode(dn2.getDatanodeId());
       dn1.setHeartbeatsDisabledForTests(true);
       cluster.setDataNodeDead(dn1.getDatanodeId());
-      assertEquals("Capacity shouldn't include DeadNode", dn2Desc.getCapacity(),
-          cluster.getNamesystem(0).getCapacityTotal());
-      assertEquals("NonDFS-used shouldn't include DeadNode",
-          dn2Desc.getNonDfsUsed(),
-          cluster.getNamesystem(0).getNonDfsUsedSpace());
+        assertEquals(dn2Desc.getCapacity(),
+                cluster.getNamesystem(0).getCapacityTotal(), "Capacity shouldn't include DeadNode");
+        assertEquals(
+                dn2Desc.getNonDfsUsed(),
+                cluster.getNamesystem(0).getNonDfsUsedSpace(), "NonDFS-used shouldn't include DeadNode");
       // Wait for re-registration and heartbeat
       dn1.setHeartbeatsDisabledForTests(false);
       final DatanodeDescriptor dn1Desc = cluster.getNamesystem(0)
@@ -222,11 +219,11 @@
           return dn1Desc.isAlive() && dn1Desc.isHeartbeatedSinceRegistration();
         }
       }, 100, 5000);
-      assertEquals("Capacity should be 0 after all DNs dead", initialCapacity,
-          cluster.getNamesystem(0).getCapacityTotal());
+        assertEquals(initialCapacity,
+                cluster.getNamesystem(0).getCapacityTotal(), "Capacity should be 0 after all DNs dead");
       long nonDfsAfterReg = cluster.getNamesystem(0).getNonDfsUsedSpace();
-      assertEquals("NonDFS should include actual DN NonDFSUsed",
-          dn1Desc.getNonDfsUsed() + dn2Desc.getNonDfsUsed(), nonDfsAfterReg);
+        assertEquals(
+                dn1Desc.getNonDfsUsed() + dn2Desc.getNonDfsUsed(), nonDfsAfterReg, "NonDFS should include actual DN NonDFSUsed");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index e8bd837..c48c279 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.io.PrintStream;
@@ -60,9 +60,9 @@
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -126,13 +126,13 @@
         .setHeartbeatExpireInterval(3000);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     setupConfig();
     createCluster();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (hostsFileWriter != null) {
       hostsFileWriter.cleanup();
@@ -218,21 +218,21 @@
           count++;
         }
       }
-      assertTrue("No decommissioning output", num != null);
-      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
-          num.intValue());
-      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
-          count);
+        assertTrue(num != null, "No decommissioning output");
+        assertEquals(expectedDecomm.size(),
+                num.intValue(), "Unexpected number of decomming DNs");
+        assertEquals(expectedDecomm.size(),
+                count, "Unexpected number of decomming DNs");
 
       // Check Java API for correct contents
       List<DatanodeInfo> decomming =
           new ArrayList<DatanodeInfo>(Arrays.asList(dfs
               .getDataNodeStats(DatanodeReportType.DECOMMISSIONING)));
-      assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
-          decomming.size());
+        assertEquals(expectedDecomm.size(),
+                decomming.size(), "Unexpected number of decomming DNs");
       for (DatanodeID id : expectedDecomm) {
-        assertTrue("Did not find expected decomming DN " + id,
-            decomming.contains(id));
+          assertTrue(
+                  decomming.contains(id), "Did not find expected decomming DN " + id);
       }
     } finally {
       System.setOut(oldOut);
@@ -265,7 +265,7 @@
         .getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", 2, info.length);
+      assertEquals(2, info.length, "Number of Datanodes ");
     DistributedFileSystem fileSys = cluster.getFileSystem();
     DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
 
@@ -434,13 +434,13 @@
     BlockManagerTestUtil.recheckDecommissionState(dm);
     // Block until the admin's monitor updates the number of tracked nodes.
     waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 1);
-    // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
-    assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
-        dead.get(0).isDecommissionInProgress());
+      // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
+      assertTrue(
+              dead.get(0).isDecommissionInProgress(), "the node should be DECOMMISSION_IN_PROGRESSS");
     // Check DatanodeManager#getDecommissionNodes, make sure it returns
     // the node as decommissioning, even if it's dead
     List<DatanodeDescriptor> decomlist = dm.getDecommissioningNodes();
-    assertTrue("The node should be be decommissioning", decomlist.size() == 1);
+      assertTrue(decomlist.size() == 1, "The node should be be decommissioning");
     
     // Delete the under-replicated file, which should let the 
     // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
@@ -448,8 +448,8 @@
     BlockManagerTestUtil.recheckDecommissionState(dm);
     // Block until the admin's monitor updates the number of tracked nodes.
     waitForDecommissionedNodes(dm.getDatanodeAdminManager(), 0);
-    assertTrue("the node should be decommissioned",
-        dead.get(0).isDecommissioned());
+      assertTrue(
+              dead.get(0).isDecommissioned(), "the node should be decommissioned");
 
     // Add the node back
     cluster.restartDataNode(dataNodeProperties, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatusWithBackoffMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatusWithBackoffMonitor.java
index a68a530..d935250 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatusWithBackoffMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatusWithBackoffMonitor.java
@@ -35,12 +35,12 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.net.InetSocketAddress;
 import java.util.List;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Extends the TestDecommissioningStatus class to provide the same set of
@@ -85,7 +85,7 @@
     DFSClient client = new DFSClient(addr, conf);
     DatanodeInfo[] info =
         client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", 2, info.length);
+      assertEquals(2, info.length, "Number of Datanodes ");
     DistributedFileSystem distFileSys = cluster.getFileSystem();
     DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeduplicationMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeduplicationMap.java
index 447c7eb..2a43075 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeduplicationMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeduplicationMap.java
@@ -19,18 +19,18 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext.DeduplicationMap;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 public class TestDeduplicationMap {
   @Test
   public void testDeduplicationMap() {
     DeduplicationMap<String> m = DeduplicationMap.newMap();
-    Assert.assertEquals(1, m.getId("1"));
-    Assert.assertEquals(2, m.getId("2"));
-    Assert.assertEquals(3, m.getId("3"));
-    Assert.assertEquals(1, m.getId("1"));
-    Assert.assertEquals(2, m.getId("2"));
-    Assert.assertEquals(3, m.getId("3"));
+    Assertions.assertEquals(1, m.getId("1"));
+    Assertions.assertEquals(2, m.getId("2"));
+    Assertions.assertEquals(3, m.getId("3"));
+    Assertions.assertEquals(1, m.getId("1"));
+    Assertions.assertEquals(2, m.getId("2"));
+    Assertions.assertEquals(3, m.getId("3"));
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
index 800747a..6031f2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -38,9 +38,9 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.StaticMapping;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestDefaultBlockPlacementPolicy {
 
@@ -51,7 +51,7 @@
   private FSNamesystem namesystem = null;
   private PermissionStatus perm = null;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     StaticMapping.resetMap();
     Configuration conf = new HdfsConfiguration();
@@ -69,7 +69,7 @@
         FsPermission.getDefault());
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -156,10 +156,10 @@
     LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine, null,
         null, fileStatus.getFileId(), null, null);
 
-    assertEquals("Block should be allocated sufficient locations",
-        REPLICATION_FACTOR, locatedBlock.getLocations().length);
-    assertEquals("First datanode should be rack local", clientRack,
-        locatedBlock.getLocations()[0].getNetworkLocation());
+      assertEquals(
+              REPLICATION_FACTOR, locatedBlock.getLocations().length, "Block should be allocated sufficient locations");
+      assertEquals(clientRack,
+              locatedBlock.getLocations()[0].getNetworkLocation(), "First datanode should be rack local");
     nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(),
         src, clientMachine);
   }
@@ -209,12 +209,12 @@
       LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
           null, null, fileStatus.getFileId(), null, null);
 
-      assertEquals("Block should be allocated sufficient locations",
-          REPLICATION_FACTOR, locatedBlock.getLocations().length);
+        assertEquals(
+                REPLICATION_FACTOR, locatedBlock.getLocations().length, "Block should be allocated sufficient locations");
       if (clientRack != null) {
         if (hasBlockReplicaOnRack) {
-          assertEquals("First datanode should be rack local", clientRack,
-              locatedBlock.getLocations()[0].getNetworkLocation());
+            assertEquals(clientRack,
+                    locatedBlock.getLocations()[0].getNetworkLocation(), "First datanode should be rack local");
         } else {
           for (DatanodeInfo dni : locatedBlock.getLocations()) {
             assertNotEquals(clientRack, dni.getNetworkLocation());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
index 9d32528..52aa136 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
@@ -58,17 +58,17 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
-import static org.junit.Assert.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
 
 /**
  * Test race between delete and other operations.  For now only addBlock()
@@ -117,7 +117,7 @@
         // write data and syn to make sure a block is allocated.
         out.write(new byte[32], 0, 32);
         out.hsync();
-        Assert.fail("Should have failed.");
+        Assertions.fail("Should have failed.");
       } catch (FileNotFoundException e) {
         GenericTestUtils.assertExceptionContains(filePath.getName(), e);
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index 771caef..f0b8e4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 
@@ -50,11 +48,11 @@
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -68,7 +66,7 @@
   private static Configuration conf;
   private static MiniDFSCluster cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -77,7 +75,7 @@
     cluster.waitActive();
   }
 
-  @Before
+  @BeforeEach
   public void resetCluster() throws Exception {
     if (!cluster.isClusterUp()) {
       // Previous test seems to have left cluster in a bad state;
@@ -89,7 +87,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -250,7 +248,7 @@
         .getSpaceConsumed().getStorageSpace();
     try {
       DFSTestUtil.appendFile(getDFS(), file, BLOCKSIZE);
-      Assert.fail("append didn't fail");
+      Assertions.fail("append didn't fail");
     } catch (DSQuotaExceededException e) {
       // ignore
     }
@@ -258,9 +256,9 @@
     LeaseManager lm = cluster.getNamesystem().getLeaseManager();
     // check that the file exists, isn't UC, and has no dangling lease
     INodeFile inode = getFSDirectory().getINode(file.toString()).asFile();
-    Assert.assertNotNull(inode);
-    Assert.assertFalse("should not be UC", inode.isUnderConstruction());
-    Assert.assertNull("should not have a lease", lm.getLease(inode));
+    Assertions.assertNotNull(inode);
+      Assertions.assertFalse(inode.isUnderConstruction(), "should not be UC");
+      Assertions.assertNull(lm.getLease(inode), "should not have a lease");
     // make sure the quota usage is unchanged
     final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getStorageSpace();
@@ -293,7 +291,7 @@
         .getSpaceConsumed().getStorageSpace();
     try {
       DFSTestUtil.appendFile(getDFS(), file, BLOCKSIZE);
-      Assert.fail("append didn't fail");
+      Assertions.fail("append didn't fail");
     } catch (QuotaByStorageTypeExceededException e) {
       //ignore
     }
@@ -301,9 +299,9 @@
     // check that the file exists, isn't UC, and has no dangling lease
     LeaseManager lm = cluster.getNamesystem().getLeaseManager();
     INodeFile inode = getFSDirectory().getINode(file.toString()).asFile();
-    Assert.assertNotNull(inode);
-    Assert.assertFalse("should not be UC", inode.isUnderConstruction());
-    Assert.assertNull("should not have a lease", lm.getLease(inode));
+    Assertions.assertNotNull(inode);
+      Assertions.assertFalse(inode.isUnderConstruction(), "should not be UC");
+      Assertions.assertNull(lm.getLease(inode), "should not have a lease");
     // make sure the quota usage is unchanged
     final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getStorageSpace();
@@ -333,7 +331,7 @@
         .getSpaceConsumed().getStorageSpace();
     try {
       getDFS().truncate(file, BLOCKSIZE / 2 - 1);
-      Assert.fail("truncate didn't fail");
+      Assertions.fail("truncate didn't fail");
     } catch (RemoteException e) {
       assertTrue(e.getClassName().contains("DSQuotaExceededException"));
     }
@@ -341,9 +339,9 @@
     // check that the file exists, isn't UC, and has no dangling lease
     LeaseManager lm = cluster.getNamesystem().getLeaseManager();
     INodeFile inode = getFSDirectory().getINode(file.toString()).asFile();
-    Assert.assertNotNull(inode);
-    Assert.assertFalse("should not be UC", inode.isUnderConstruction());
-    Assert.assertNull("should not have a lease", lm.getLease(inode));
+    Assertions.assertNotNull(inode);
+      Assertions.assertFalse(inode.isUnderConstruction(), "should not be UC");
+      Assertions.assertNull(lm.getLease(inode), "should not have a lease");
     // make sure the quota usage is unchanged
     final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getStorageSpace();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index 17803a0..958130fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -23,11 +23,7 @@
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.BufferedInputStream;
 import java.io.ByteArrayInputStream;
@@ -93,7 +89,7 @@
 import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.spi.LoggingEvent;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -353,8 +349,8 @@
                 1, expectedTxns);
         File editFile1 = NNStorage.getFinalizedEditsFile(it.next(),
                 203, 404);
-        assertTrue("Expect " + editFile + " exists", editFile.exists());
-        assertTrue("Expect " + editFile1 + " exists", editFile1.exists());
+          assertTrue(editFile.exists(), "Expect " + editFile + " exists");
+          assertTrue(editFile1.exists(), "Expect " + editFile1 + " exists");
         EditLogFileInputStream editLogFileInputStream1 =
                 new EditLogFileInputStream(editFile, 1, 202, false);
         EditLogFileInputStream editLogFileInputStream2 =
@@ -454,7 +450,7 @@
     NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
     for (StorageDirectory sd : storage.dirIterable(dirType)) {
       File f = new File(sd.getCurrentDir(), filename);
-      assertTrue("Expect that " + f + " exists", f.exists());
+        assertTrue(f.exists(), "Expect that " + f + " exists");
     }
   }
   
@@ -536,7 +532,7 @@
         
         File editFile = NNStorage.getFinalizedEditsFile(it.next(), 3,
             3 + expectedTxns - 1);
-        assertTrue("Expect " + editFile + " exists", editFile.exists());
+          assertTrue(editFile.exists(), "Expect " + editFile + " exists");
         
         System.out.println("Verifying file: " + editFile);
         long numEdits = loader.loadFSEdits(
@@ -544,10 +540,10 @@
         int numLeases = namesystem.leaseManager.countLease();
         System.out.println("Number of outstanding leases " + numLeases);
         assertEquals(0, numLeases);
-        assertTrue("Verification for " + editFile + " failed. " +
-                   "Expected " + expectedTxns + " transactions. "+
-                   "Found " + numEdits + " transactions.",
-                   numEdits == expectedTxns);
+          assertTrue(
+                  numEdits == expectedTxns, "Verification for " + editFile + " failed. " +
+                  "Expected " + expectedTxns + " transactions. " +
+                  "Found " + numEdits + " transactions.");
   
       }
     } finally {
@@ -617,29 +613,29 @@
       FSImage fsimage = namesystem.getFSImage();
       final FSEditLog editLog = fsimage.getEditLog();
 
-      assertEquals("should start with only the BEGIN_LOG_SEGMENT txn synced",
-        1, editLog.getSyncTxId());
+        assertEquals(
+                1, editLog.getSyncTxId(), "should start with only the BEGIN_LOG_SEGMENT txn synced");
       
       // Log an edit from thread A
       doLogEdit(threadA, editLog, "thread-a 1");
-      assertEquals("logging edit without syncing should do not affect txid",
-        1, editLog.getSyncTxId());
+        assertEquals(
+                1, editLog.getSyncTxId(), "logging edit without syncing should do not affect txid");
 
       // Log an edit from thread B
       doLogEdit(threadB, editLog, "thread-b 1");
-      assertEquals("logging edit without syncing should do not affect txid",
-        1, editLog.getSyncTxId());
+        assertEquals(
+                1, editLog.getSyncTxId(), "logging edit without syncing should do not affect txid");
 
       // Now ask to sync edit from B, which should sync both edits.
       doCallLogSync(threadB, editLog);
-      assertEquals("logSync from second thread should bump txid up to 3",
-        3, editLog.getSyncTxId());
+        assertEquals(
+                3, editLog.getSyncTxId(), "logSync from second thread should bump txid up to 3");
 
       // Now ask to sync edit from A, which was already batched in - thus
       // it should increment the batch count metric
       doCallLogSync(threadA, editLog);
-      assertEquals("logSync from first thread shouldn't change txid",
-        3, editLog.getSyncTxId());
+        assertEquals(
+                3, editLog.getSyncTxId(), "logSync from first thread shouldn't change txid");
 
       //Should have incremented the batch count exactly once
       assertCounter("TransactionsBatchedInSync", 1L, 
@@ -685,13 +681,13 @@
       // async log is doing batched syncs in background.  logSync just ensures
       // the edit is durable, so the txid may increase prior to sync
       if (!useAsyncEditLog) {
-        assertEquals("logging edit without syncing should do not affect txid",
-            1, editLog.getSyncTxId());
+          assertEquals(
+                  1, editLog.getSyncTxId(), "logging edit without syncing should do not affect txid");
       }
       // logSyncAll in Thread B
       doCallLogSyncAll(threadB, editLog);
-      assertEquals("logSyncAll should sync thread A's transaction",
-        2, editLog.getSyncTxId());
+        assertEquals(
+                2, editLog.getSyncTxId(), "logSyncAll should sync thread A's transaction");
 
       // Close edit log
       editLog.close();
@@ -748,10 +744,10 @@
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
       fail("should not be able to start");
     } catch (IOException e) {
-      // expected
-      assertNotNull("Cause of exception should be ChecksumException", e.getCause());
-      assertEquals("Cause of exception should be ChecksumException",
-          ChecksumException.class, e.getCause().getClass());
+        // expected
+        assertNotNull(e.getCause(), "Cause of exception should be ChecksumException");
+        assertEquals(
+                ChecksumException.class, e.getCause().getClass(), "Cause of exception should be ChecksumException");
     }
   }
 
@@ -826,11 +822,11 @@
         // We should see the file as in-progress
         File editsFile = new File(currentDir,
             NNStorage.getInProgressEditsFileName(1));
-        assertTrue("Edits file " + editsFile + " should exist", editsFile.exists());        
+        assertTrue(editsFile.exists(), "Edits file " + editsFile + " should exist");        
         
         File imageFile = FSImageTestUtil.findNewestImageFile(
             currentDir.getAbsolutePath());
-        assertNotNull("No image found in " + nameDir, imageFile);
+        assertNotNull(imageFile, "No image found in " + nameDir);
         assertEquals(NNStorage.getImageFileName(0), imageFile.getName());
         // Try to start a new cluster
         LOG.info("\n===========================================\n" +
@@ -858,7 +854,7 @@
         }
         imageFile = FSImageTestUtil.findNewestImageFile(
             currentDir.getAbsolutePath());
-        assertNotNull("No image found in " + nameDir, imageFile);
+        assertNotNull(imageFile, "No image found in " + nameDir);
         assertEquals(NNStorage.getImageFileName(expectedTxId),
                      imageFile.getName());
         
@@ -1200,7 +1196,7 @@
       String[] logSpecs = dirSpec.split("\\|");
       for (String logSpec : logSpecs) {
         Matcher m = Pattern.compile("\\[(\\d+),(\\d+)?\\]").matcher(logSpec);
-        assertTrue("bad spec: " + logSpec, m.matches());
+          assertTrue(m.matches(), "bad spec: " + logSpec);
         if (m.group(2) == null) {
           files.add(NNStorage.getInProgressEditsFileName(
               Long.parseLong(m.group(1))));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
index c6aad17..f9f4cf6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
@@ -39,9 +39,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -76,7 +76,7 @@
 
   public static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class);
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     // Stall the standby checkpointer in two ways
@@ -119,7 +119,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (fs != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
index 297ddb5..34de842 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -44,8 +44,8 @@
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 
 public class TestEditLogFileInputStream {
@@ -147,13 +147,13 @@
     rwf.close();
 
     EditLogFileInputStream elis = new EditLogFileInputStream(editLog);
-    Assert.assertEquals(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
+    Assertions.assertEquals(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
         elis.getVersion(true));
-    Assert.assertEquals(1, elis.scanNextOp());
+    Assertions.assertEquals(1, elis.scanNextOp());
     LOG.debug("Read transaction 1 from " + editLog);
     try {
       elis.scanNextOp();
-      Assert.fail("Expected scanNextOp to fail when op checksum was corrupt.");
+      Assertions.fail("Expected scanNextOp to fail when op checksum was corrupt.");
     } catch (IOException e) {
       LOG.debug("Caught expected checksum error when reading corrupt " +
           "transaction 2", e);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
index 9ea4548..10ecbda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -28,10 +28,10 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test the EditLogFileOutputStream
@@ -45,21 +45,21 @@
 
   private Configuration conf;
 
-  @BeforeClass
+  @BeforeAll
   public static void disableFsync() {
     // No need to fsync for the purposes of tests. This makes
     // the tests run much faster.
     EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
   }
 
-  @Before
-  @After
+  @BeforeEach
+  @AfterEach
   public void deleteEditsFile() {
     if (TEST_EDITS.exists())
       TEST_EDITS.delete();
   }
 
-  @Before
+  @BeforeEach
   public void setUp() {
     conf = new Configuration();
   }
@@ -137,7 +137,7 @@
       editLogStream.close();
     } catch (IOException ioe) {
       String msg = StringUtils.stringifyException(ioe);
-      assertTrue(msg, msg.contains("Trying to use aborted output stream"));
+        assertTrue(msg.contains("Trying to use aborted output stream"), msg);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
index dff2ec6..35ee369 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.anyInt;
@@ -45,9 +43,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -84,7 +82,7 @@
    * Create the mini cluster for testing and sub in a custom runtime so that
    * edit log journal failures don't actually cause the JVM to exit.
    */
-  @Before
+  @BeforeEach
   public void setUpMiniCluster() throws IOException {
     setUpMiniCluster(getConf(), true);
   }
@@ -97,7 +95,7 @@
     fs = cluster.getFileSystem();
   }
   
-  @After
+  @AfterEach
   public void shutDownMiniCluster() throws IOException {
     if (fs != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
index 083d9e5..a423dc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_OWNER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.argThat;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.spy;
@@ -263,7 +261,7 @@
         assertEquals(previousLogTxId, nextLog);
         
         File expectedLog = NNStorage.getInProgressEditsFile(sd, previousLogTxId);
-        assertTrue("Expect " + expectedLog + " to exist", expectedLog.exists());
+          assertTrue(expectedLog.exists(), "Expect " + expectedLog + " to exist");
       }
     } finally {
       stopTransactionWorkers();
@@ -638,7 +636,7 @@
                 LOG.info("thread[" + ii +"] edits=" + i);
               }
             }
-            assertTrue("too many edits", done.get());
+              assertTrue(done.get(), "too many edits");
             return null;
           }
         });
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
index 3b15c2d..957258d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
@@ -17,17 +17,14 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 public class TestEditsDoubleBuffer {
   @Test
@@ -37,16 +34,16 @@
     assertTrue(buf.isFlushed());
     byte[] data = new byte[100];
     buf.writeRaw(data, 0, data.length);
-    assertEquals("Should count new data correctly",
-        data.length, buf.countBufferedBytes());
+      assertEquals(
+              data.length, buf.countBufferedBytes(), "Should count new data correctly");
 
-    assertTrue("Writing to current buffer should not affect flush state",
-        buf.isFlushed());
+      assertTrue(
+              buf.isFlushed(), "Writing to current buffer should not affect flush state");
 
     // Swap the buffers
     buf.setReadyToFlush();
-    assertEquals("Swapping buffers should still count buffered bytes",
-        data.length, buf.countBufferedBytes());
+      assertEquals(
+              data.length, buf.countBufferedBytes(), "Swapping buffers should still count buffered bytes");
     assertFalse(buf.isFlushed());
  
     // Flush to a stream
@@ -58,8 +55,8 @@
     
     // Write some more
     buf.writeRaw(data, 0, data.length);
-    assertEquals("Should count new data correctly",
-        data.length, buf.countBufferedBytes());
+      assertEquals(
+              data.length, buf.countBufferedBytes(), "Should count new data correctly");
     buf.setReadyToFlush();
     buf.flushTo(outBuf);
     
@@ -130,12 +127,12 @@
       EditsDoubleBuffer.LOG.info("Exception expected: ", ioe);
     }
     logs.stopCapturing();
-    // Make sure ops are dumped into log in human readable format.
-    Assert.assertTrue("expected " + op.toString() + " in the log",
-        logs.getOutput().contains(op.toString()));
-    Assert.assertTrue("expected " + op2.toString() + " in the log",
-        logs.getOutput().contains(op2.toString()));
-    Assert.assertTrue("expected " + op3.toString() + " in the log",
-        logs.getOutput().contains(op3.toString()));
+      // Make sure ops are dumped into log in human readable format.
+      Assertions.assertTrue(
+              logs.getOutput().contains(op.toString()), "expected " + op.toString() + " in the log");
+      Assertions.assertTrue(
+              logs.getOutput().contains(op2.toString()), "expected " + op2.toString() + " in the log");
+      Assertions.assertTrue(
+              logs.getOutput().contains(op3.toString()), "expected " + op3.toString() + " in the log");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java
index 843e9e4..849173e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java
@@ -25,9 +25,9 @@
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
@@ -36,11 +36,7 @@
 import java.util.List;
 import java.util.Set;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test that ErasureCodingPolicyManager correctly parses the set of enabled
@@ -70,8 +66,8 @@
         ErasureCodingPolicyManager.getInstance();
     manager.init(conf);
     manager.enablePolicy(value);
-    assertEquals("Incorrect number of enabled policies",
-        numEnabled, manager.getEnabledPolicies().length);
+      assertEquals(
+              numEnabled, manager.getEnabledPolicies().length, "Incorrect number of enabled policies");
   }
 
   @Test
@@ -132,8 +128,8 @@
     final String defaultPolicy = conf.getTrimmed(
         DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
         DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
-    assertNotEquals("The default policy and the next default policy " +
-        "should not be the same!", testPolicy, defaultPolicy);
+      assertNotEquals(testPolicy, defaultPolicy, "The default policy and the next default policy " +
+              "should not be the same!");
 
     ErasureCodingPolicyManager manager =
         ErasureCodingPolicyManager.getInstance();
@@ -147,35 +143,35 @@
 
     ErasureCodingPolicyInfo[] getPoliciesResult = manager.getPolicies();
     boolean isEnabled = isPolicyEnabled(testPolicy, getPoliciesResult);
-    assertTrue("The new default policy should be " +
-        "in enabled state!", isEnabled);
+      assertTrue(isEnabled, "The new default policy should be " +
+              "in enabled state!");
     ErasureCodingPolicyInfo[] getPersistedPoliciesResult
         = manager.getPersistedPolicies();
     isEnabled = isPolicyEnabled(testPolicy, getPersistedPoliciesResult);
-    assertFalse("The new default policy should be " +
-        "in disabled state in the persisted list!", isEnabled);
+      assertFalse(isEnabled, "The new default policy should be " +
+              "in disabled state in the persisted list!");
 
     manager.disablePolicy(testPolicy);
     getPoliciesResult = manager.getPolicies();
     isEnabled = isPolicyEnabled(testPolicy, getPoliciesResult);
-    assertFalse("The new default policy should be " +
-        "in disabled state!", isEnabled);
+      assertFalse(isEnabled, "The new default policy should be " +
+              "in disabled state!");
     getPersistedPoliciesResult
         = manager.getPersistedPolicies();
     isEnabled = isPolicyEnabled(testPolicy, getPersistedPoliciesResult);
-    assertFalse("The new default policy should be " +
-        "in disabled state in the persisted list!", isEnabled);
+      assertFalse(isEnabled, "The new default policy should be " +
+              "in disabled state in the persisted list!");
 
     manager.enablePolicy(testPolicy);
     getPoliciesResult = manager.getPolicies();
     isEnabled = isPolicyEnabled(testPolicy, getPoliciesResult);
-    assertTrue("The new default policy should be " +
-        "in enabled state!", isEnabled);
+      assertTrue(isEnabled, "The new default policy should be " +
+              "in enabled state!");
     getPersistedPoliciesResult
         = manager.getPersistedPolicies();
     isEnabled = isPolicyEnabled(testPolicy, getPersistedPoliciesResult);
-    assertTrue("The new default policy should be " +
-        "in enabled state in the persisted list!", isEnabled);
+      assertTrue(isEnabled, "The new default policy should be " +
+              "in enabled state in the persisted list!");
 
     final String emptyPolicy = "";
     // Change the default policy to a empty
@@ -201,28 +197,28 @@
     // Check that returned values are unique
     Set<String> found = new HashSet<>();
     for (ErasureCodingPolicy p : manager.getEnabledPolicies()) {
-      Assert.assertFalse("Duplicate policy name found: " + p.getName(),
-          found.contains(p.getName()));
+        Assertions.assertFalse(
+                found.contains(p.getName()), "Duplicate policy name found: " + p.getName());
       found.add(p.getName());
     }
     // Check that the policies specified in conf are found
     for (ErasureCodingPolicy p: enabledPolicies) {
-      Assert.assertTrue("Did not find specified EC policy " + p.getName(),
-          found.contains(p.getName()));
+        Assertions.assertTrue(
+                found.contains(p.getName()), "Did not find specified EC policy " + p.getName());
     }
-    Assert.assertEquals(enabledPolicies.length, found.size()-1);
+    Assertions.assertEquals(enabledPolicies.length, found.size()-1);
     // Check that getEnabledPolicyByName only returns enabled policies
     for (ErasureCodingPolicy p: SystemErasureCodingPolicies.getPolicies()) {
       if (found.contains(p.getName())) {
-        // Enabled policy should be present
-        Assert.assertNotNull(
-            "getEnabledPolicyByName did not find enabled policy" + p.getName(),
-            manager.getEnabledPolicyByName(p.getName()));
+          // Enabled policy should be present
+          Assertions.assertNotNull(
+                  manager.getEnabledPolicyByName(p.getName()),
+                  "getEnabledPolicyByName did not find enabled policy" + p.getName());
       } else {
-        // Disabled policy should not be present
-        Assert.assertNull(
-            "getEnabledPolicyByName found disabled policy " + p.getName(),
-            manager.getEnabledPolicyByName(p.getName()));
+          // Disabled policy should not be present
+          Assertions.assertNull(
+                  manager.getEnabledPolicyByName(p.getName()),
+                  "getEnabledPolicyByName found disabled policy " + p.getName());
       }
     }
   }
@@ -250,7 +246,7 @@
   private void assertAllPoliciesAreDisabled(
       ErasureCodingPolicyInfo[] policies) {
     for (ErasureCodingPolicyInfo p : policies) {
-      assertTrue("Policy should be disabled", p.isDisabled());
+        assertTrue(p.isDisabled(), "Policy should be disabled");
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
index fecbbfa..dd7a3c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEncryptionZoneManager.java
@@ -18,7 +18,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -30,8 +30,8 @@
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test class for EncryptionZoneManager methods. Added tests for
@@ -48,7 +48,7 @@
   private PermissionStatus defaultPermission;
   private EncryptionZoneManager ezManager;
 
-  @Before
+  @BeforeEach
   public void setup() {
     this.mockedDir = mock(FSDirectory.class);
     this.mockedINodesInPath = mock(INodesInPath.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
index df7ab3d..cd920d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java
@@ -29,8 +29,8 @@
 
 import java.io.FileNotFoundException;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.when;
 
 /**
@@ -93,45 +93,45 @@
 
   @Test
   public void testUnprotectedSetPermissions() throws Exception {
-    assertTrue("setPermissions return true for updated permissions",
-        unprotectedSetAttributes((short) 0777, (short) 0));
-    assertFalse("setPermissions should return false for same permissions",
-        unprotectedSetAttributes((short) 0777, (short) 0777));
+      assertTrue(
+              unprotectedSetAttributes((short) 0777, (short) 0), "setPermissions return true for updated permissions");
+      assertFalse(
+              unprotectedSetAttributes((short) 0777, (short) 0777), "setPermissions should return false for same permissions");
   }
 
   @Test
   public void testUnprotectedSetOwner() throws Exception {
-    assertTrue("SetOwner should return true for a new user",
-        unprotectedSetAttributes((short) 0777, (short) 0777, "user1",
-            "user2", true));
-    assertFalse("SetOwner should return false for same user",
-        unprotectedSetAttributes((short) 0777, (short) 0777, "user1",
-            "user1", true));
+      assertTrue(
+              unprotectedSetAttributes((short) 0777, (short) 0777, "user1",
+                      "user2", true), "SetOwner should return true for a new user");
+      assertFalse(
+              unprotectedSetAttributes((short) 0777, (short) 0777, "user1",
+                      "user1", true), "SetOwner should return false for same user");
   }
 
   @Test
   public void testUnprotectedSetTimes() throws Exception {
-    // atime < access time + precision
-    assertFalse("SetTimes should not update access time "
-          + "because it's within the last precision interval",
-        unprotectedSetTimes(100, 0, 1000, -1, false));
+      // atime < access time + precision
+      assertFalse(
+              unprotectedSetTimes(100, 0, 1000, -1, false), "SetTimes should not update access time "
+              + "because it's within the last precision interval");
 
-    // atime = access time + precision
-    assertFalse("SetTimes should not update access time "
-          + "because it's within the last precision interval",
-        unprotectedSetTimes(1000, 0, 1000, -1, false));
+      // atime = access time + precision
+      assertFalse(
+              unprotectedSetTimes(1000, 0, 1000, -1, false), "SetTimes should not update access time "
+              + "because it's within the last precision interval");
 
-    // atime > access time + precision
-    assertTrue("SetTimes should update access time",
-        unprotectedSetTimes(1011, 10, 1000, -1, false));
+      // atime > access time + precision
+      assertTrue(
+              unprotectedSetTimes(1011, 10, 1000, -1, false), "SetTimes should update access time");
 
-    // atime < access time + precision, but force is set
-    assertTrue("SetTimes should update access time",
-        unprotectedSetTimes(100, 0, 1000, -1, true));
+      // atime < access time + precision, but force is set
+      assertTrue(
+              unprotectedSetTimes(100, 0, 1000, -1, true), "SetTimes should update access time");
 
-    // atime < access time + precision, but mtime is set
-    assertTrue("SetTimes should update access time",
-        unprotectedSetTimes(100, 0, 1000, 1, false));
+      // atime < access time + precision, but mtime is set
+      assertTrue(
+              unprotectedSetTimes(100, 0, 1000, 1, false), "SetTimes should update access time");
   }
 
   @Test(expected = FileNotFoundException.class)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java
index e3cfc01..ae36177 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirWriteFileOp.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyByte;
 import static org.mockito.ArgumentMatchers.anyInt;
@@ -38,7 +38,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.ValidateAddBlockResult;
 import org.apache.hadoop.net.Node;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.ArgumentCaptor;
 
 public class TestFSDirWriteFileOp {
@@ -71,9 +71,9 @@
 
     verifyNoMoreInteractions(bmMock);
 
-    assertNull(
-        "Source node was assigned a value. Expected 'null' value because "
-            + "chooseTarget was flagged to ignore source node locality",
-        nodeCaptor.getValue());
+      assertNull(
+              nodeCaptor.getValue(),
+              "Source node was assigned a value. Expected 'null' value because "
+                      + "chooseTarget was flagged to ignore source node locality");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
index 556a26d..a45ab99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
@@ -42,17 +42,13 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test {@link FSDirectory}, the in-memory namespace tree.
@@ -88,7 +84,7 @@
   private static final ImmutableList<XAttr> generatedXAttrs =
       ImmutableList.copyOf(generateXAttrs(numGeneratedXAttrs));
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
@@ -109,7 +105,7 @@
     hdfs.mkdirs(sub2);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -134,10 +130,10 @@
     for(; (line = in.readLine()) != null; ) {
       line = line.trim();
       if (!line.isEmpty() && !line.contains("snapshot")) {
-        assertTrue("line=" + line,
-            line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
-                || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)
-        );
+          assertTrue(
+                  line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
+                          || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)
+          , "line=" + line);
         checkClassName(line);
       }
     }
@@ -234,12 +230,12 @@
    */
   private static void verifyXAttrsPresent(List<XAttr> newXAttrs,
       final int num) {
-    assertEquals("Unexpected number of XAttrs after multiset", num,
-        newXAttrs.size());
+      assertEquals(num,
+              newXAttrs.size(), "Unexpected number of XAttrs after multiset");
     for (int i=0; i<num; i++) {
       XAttr search = generatedXAttrs.get(i);
-      assertTrue("Did not find set XAttr " + search + " + after multiset",
-          newXAttrs.contains(search));
+        assertTrue(
+                newXAttrs.contains(search), "Did not find set XAttr " + search + " + after multiset");
     }
   }
 
@@ -307,8 +303,8 @@
       List<XAttr> newXAttrs = FSDirXAttrOp.filterINodeXAttrs(existingXAttrs,
                                                              toRemove,
                                                              removedXAttrs);
-      assertEquals("Unexpected number of removed XAttrs",
-          expectedNumToRemove, removedXAttrs.size());
+        assertEquals(
+                expectedNumToRemove, removedXAttrs.size(), "Unexpected number of removed XAttrs");
       verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
       existingXAttrs = newXAttrs;
     }
@@ -356,9 +352,9 @@
     List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs,
                                                         toAdd, EnumSet.of(
             XAttrSetFlag.CREATE));
-    assertEquals("Unexpected toAdd size", 2, toAdd.size());
+      assertEquals(2, toAdd.size(), "Unexpected toAdd size");
     for (XAttr x : toAdd) {
-      assertTrue("Did not find added XAttr " + x, newXAttrs.contains(x));
+        assertTrue(newXAttrs.contains(x), "Did not find added XAttr " + x);
     }
     existingXAttrs = newXAttrs;
 
@@ -374,10 +370,10 @@
     }
     newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd,
                                             EnumSet.of(XAttrSetFlag.REPLACE));
-    assertEquals("Unexpected number of new XAttrs", 3, newXAttrs.size());
+      assertEquals(3, newXAttrs.size(), "Unexpected number of new XAttrs");
     for (int i=0; i<3; i++) {
-      assertArrayEquals("Unexpected XAttr value",
-          new byte[] {(byte)(i*2)}, newXAttrs.get(i).getValue());
+        assertArrayEquals(
+                new byte[]{(byte) (i * 2)}, newXAttrs.get(i).getValue(), "Unexpected XAttr value");
     }
     existingXAttrs = newXAttrs;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 8008be7..a46159a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -67,8 +64,8 @@
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.FakeTimer;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
-import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -131,7 +128,7 @@
     cluster.shutdown();
 
     File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
-    assertTrue("Should exist: " + editFile, editFile.exists());
+      assertTrue(editFile.exists(), "Should exist: " + editFile);
 
     // Corrupt the edits file.
     long fileLen = editFile.length();
@@ -151,8 +148,8 @@
           .enableManagedDfsDirsRedundancy(false).format(false).build();
       fail("should not be able to start");
     } catch (IOException e) {
-      assertTrue("error message contains opcodes message",
-          e.getMessage().matches(bld.toString()));
+        assertTrue(
+                e.getMessage().matches(bld.toString()), "error message contains opcodes message");
     }
   }
   
@@ -331,7 +328,7 @@
       // disable that here.
       doNothing().when(spyLog).endCurrentLogSegment(true);
       spyLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
-      assertTrue("should exist: " + inProgressFile, inProgressFile.exists());
+        assertTrue(inProgressFile.exists(), "should exist: " + inProgressFile);
       
       for (int i = 0; i < numTx; i++) {
         long trueOffset = getNonTrailerLength(inProgressFile);
@@ -396,8 +393,8 @@
           Long.MAX_VALUE, true);
       long expectedEndTxId = (txId == (NUM_TXNS + 1)) ?
           NUM_TXNS : (NUM_TXNS + 1);
-      assertEquals("Failed when corrupting txn opcode at " + txOffset,
-          expectedEndTxId, validation.getEndTxId());
+        assertEquals(
+                expectedEndTxId, validation.getEndTxId(), "Failed when corrupting txn opcode at " + txOffset);
       assertTrue(!validation.hasCorruptHeader());
     }
 
@@ -414,8 +411,8 @@
           Long.MAX_VALUE, true);
       long expectedEndTxId = (txId == 0) ?
           HdfsServerConstants.INVALID_TXID : (txId - 1);
-      assertEquals("Failed when corrupting txid " + txId + " txn opcode " +
-        "at " + txOffset, expectedEndTxId, validation.getEndTxId());
+        assertEquals(expectedEndTxId, validation.getEndTxId(), "Failed when corrupting txid " + txId + " txn opcode " +
+                "at " + txOffset);
       assertTrue(!validation.hasCorruptHeader());
     }
   }
@@ -451,15 +448,15 @@
     //try all codes
     for(FSEditLogOpCodes c : FSEditLogOpCodes.values()) {
       final byte code = c.getOpCode();
-      assertEquals("c=" + c + ", code=" + code,
-          c, FSEditLogOpCodes.fromByte(code));
+        assertEquals(
+                c, FSEditLogOpCodes.fromByte(code), "c=" + c + ", code=" + code);
     }
 
     //try all byte values
     for(int b = 0; b < (1 << Byte.SIZE); b++) {
       final byte code = (byte)b;
-      assertEquals("b=" + b + ", code=" + code,
-          fromByte(code), FSEditLogOpCodes.fromByte(code));
+        assertEquals(
+                fromByte(code), FSEditLogOpCodes.fromByte(code), "b=" + b + ", code=" + code);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 185db69..52131f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -18,11 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.DataOutput;
@@ -81,11 +77,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Assume;
 import org.junit.Test;
-
-import static org.junit.Assert.assertArrayEquals;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
 
 public class TestFSImage {
 
@@ -112,7 +106,7 @@
 
   @Test
   public void testNativeCompression() throws IOException {
-    Assume.assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
+    Assumptions.assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
     Configuration conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
     setCompressCodec(conf, "org.apache.hadoop.io.compress.Lz4Codec");
@@ -167,7 +161,7 @@
       assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
       // check lease manager
       Lease lease = fsn.leaseManager.getLease(file2Node);
-      Assert.assertNotNull(lease);
+      Assertions.assertNotNull(lease);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -464,8 +458,8 @@
     try {
       FileSystem fs = cluster.getFileSystem();
       Path testPath = new Path("/tmp/zeroBlockFile");
-      assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
-      assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
+        assertTrue(fs.exists(testPath), "File /tmp/zeroBlockFile doesn't exist ");
+        assertTrue(cluster.isNameNodeUp(0), "Name node didn't come up");
     } finally {
       cluster.shutdown();
       //Clean up
@@ -801,15 +795,15 @@
       assertTrue(fs.exists(replicaFile1));
       assertTrue(fs.exists(replicaFile2));
 
-      // check directories
-      assertEquals("Directory should have default EC policy.",
-          defaultEcPolicy, fs.getErasureCodingPolicy(ecDir));
-      assertEquals("Directory should hide replication EC policy.",
-          null, fs.getErasureCodingPolicy(replicaDir));
+        // check directories
+        assertEquals(
+                defaultEcPolicy, fs.getErasureCodingPolicy(ecDir), "Directory should have default EC policy.");
+        assertEquals(
+                null, fs.getErasureCodingPolicy(replicaDir), "Directory should hide replication EC policy.");
 
-      // check file1
-      assertEquals("File should not have EC policy.", null,
-          fs.getErasureCodingPolicy(replicaFile1));
+        // check file1
+        assertEquals(null,
+                fs.getErasureCodingPolicy(replicaFile1), "File should not have EC policy.");
       // check internals of file2
       INodeFile file2Node =
           fsn.dir.getINode4Write(replicaFile2.toString()).asFile();
@@ -818,13 +812,13 @@
       BlockInfo[] blks = file2Node.getBlocks();
       assertEquals(1, blks.length);
       assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
-      assertEquals("File should return expected replication factor.",
-          2, blks[0].getReplication());
-      assertEquals("File should not have EC policy.", null,
-          fs.getErasureCodingPolicy(replicaFile2));
+        assertEquals(
+                2, blks[0].getReplication(), "File should return expected replication factor.");
+        assertEquals(null,
+                fs.getErasureCodingPolicy(replicaFile2), "File should not have EC policy.");
       // check lease manager
       Lease lease = fsn.leaseManager.getLease(file2Node);
-      Assert.assertNotNull(lease);
+      Assertions.assertNotNull(lease);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -854,9 +848,9 @@
       cluster.restartNameNodes();
       cluster.waitActive();
 
-      assertEquals("Erasure coding policy number should match",
-          SystemErasureCodingPolicies.getPolicies().size(),
-          ErasureCodingPolicyManager.getInstance().getPolicies().length);
+        assertEquals(
+                SystemErasureCodingPolicies.getPolicies().size(),
+                ErasureCodingPolicyManager.getInstance().getPolicies().length, "Erasure coding policy number should match");
 
       // Add new erasure coding policy
       ECSchema newSchema = new ECSchema("rs", 5, 4);
@@ -877,17 +871,17 @@
       cluster.restartNameNodes();
       cluster.waitActive();
 
-      assertEquals("Erasure coding policy number should match",
-          SystemErasureCodingPolicies.getPolicies().size() + 1,
-          ErasureCodingPolicyManager.getInstance().getPolicies().length);
+        assertEquals(
+                SystemErasureCodingPolicies.getPolicies().size() + 1,
+                ErasureCodingPolicyManager.getInstance().getPolicies().length, "Erasure coding policy number should match");
       ErasureCodingPolicy ecPolicy =
           ErasureCodingPolicyManager.getInstance().getByID(newPolicy.getId());
-      assertEquals("Newly added erasure coding policy is not found",
-          newPolicy, ecPolicy);
-      assertEquals(
-          "Newly added erasure coding policy should be of disabled state",
-          ErasureCodingPolicyState.DISABLED,
-          DFSTestUtil.getECPolicyState(ecPolicy));
+        assertEquals(
+                newPolicy, ecPolicy, "Newly added erasure coding policy is not found");
+        assertEquals(
+                ErasureCodingPolicyState.DISABLED,
+                DFSTestUtil.getECPolicyState(ecPolicy),
+                "Newly added erasure coding policy should be of disabled state");
 
       // Test enable/disable/remove user customized erasure coding policy
       testChangeErasureCodingPolicyState(cluster, blockSize, newPolicy, false);
@@ -926,13 +920,13 @@
     cluster.waitActive();
     ErasureCodingPolicy ecPolicy =
         ErasureCodingPolicyManager.getInstance().getByID(targetPolicy.getId());
-    assertEquals("The erasure coding policy is not found",
-        targetPolicy, ecPolicy);
-    assertEquals("The erasure coding policy should be of enabled state",
-        ErasureCodingPolicyState.ENABLED,
-        DFSTestUtil.getECPolicyState(ecPolicy));
-    assertTrue("Policy should be in disabled state in FSImage!",
-        isPolicyEnabledInFsImage(targetPolicy));
+      assertEquals(
+              targetPolicy, ecPolicy, "The erasure coding policy is not found");
+      assertEquals(
+              ErasureCodingPolicyState.ENABLED,
+              DFSTestUtil.getECPolicyState(ecPolicy), "The erasure coding policy should be of enabled state");
+      assertTrue(
+              isPolicyEnabledInFsImage(targetPolicy), "Policy should be in disabled state in FSImage!");
 
     // Read file regardless of the erasure coding policy state
     DFSTestUtil.readFileAsBytes(fs, filePath);
@@ -948,19 +942,19 @@
     cluster.waitActive();
     ecPolicy =
         ErasureCodingPolicyManager.getInstance().getByID(targetPolicy.getId());
-    assertEquals("The erasure coding policy is not found",
-        targetPolicy, ecPolicy);
+      assertEquals(
+              targetPolicy, ecPolicy, "The erasure coding policy is not found");
     ErasureCodingPolicyState ecPolicyState =
         DFSTestUtil.getECPolicyState(ecPolicy);
     if (isDefault) {
-      assertEquals("The erasure coding policy should be of " +
-              "enabled state", ErasureCodingPolicyState.ENABLED, ecPolicyState);
+        assertEquals(ErasureCodingPolicyState.ENABLED, ecPolicyState, "The erasure coding policy should be of " +
+                "enabled state");
     } else {
-      assertEquals("The erasure coding policy should be of " +
-          "disabled state", ErasureCodingPolicyState.DISABLED, ecPolicyState);
+        assertEquals(ErasureCodingPolicyState.DISABLED, ecPolicyState, "The erasure coding policy should be of " +
+                "disabled state");
     }
-    assertFalse("Policy should be in disabled state in FSImage!",
-        isPolicyEnabledInFsImage(targetPolicy));
+      assertFalse(
+              isPolicyEnabledInFsImage(targetPolicy), "Policy should be in disabled state in FSImage!");
 
     // Read file regardless of the erasure coding policy state
     DFSTestUtil.readFileAsBytes(fs, filePath);
@@ -969,9 +963,9 @@
     try {
       fs.removeErasureCodingPolicy(ecPolicy.getName());
     } catch (RemoteException e) {
-      // built-in policy cannot been removed
-      assertTrue("Built-in policy cannot be removed",
-          ecPolicy.isSystemPolicy());
+        // built-in policy cannot been removed
+        assertTrue(
+                ecPolicy.isSystemPolicy(), "Built-in policy cannot be removed");
       assertExceptionContains("System erasure coding policy", e);
       return;
     }
@@ -986,11 +980,11 @@
     cluster.waitActive();
     ecPolicy = ErasureCodingPolicyManager.getInstance().getByID(
         targetPolicy.getId());
-    assertEquals("The erasure coding policy saved into and loaded from " +
-        "fsImage is bad", targetPolicy, ecPolicy);
-    assertEquals("The erasure coding policy should be of removed state",
-        ErasureCodingPolicyState.REMOVED,
-        DFSTestUtil.getECPolicyState(ecPolicy));
+      assertEquals(targetPolicy, ecPolicy, "The erasure coding policy saved into and loaded from " +
+              "fsImage is bad");
+      assertEquals(
+              ErasureCodingPolicyState.REMOVED,
+              DFSTestUtil.getECPolicyState(ecPolicy), "The erasure coding policy should be of removed state");
     // Read file regardless of the erasure coding policy state
     DFSTestUtil.readFileAsBytes(fs, filePath);
     fs.delete(dirPath, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
index bb03b30..661c56c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
@@ -20,9 +20,7 @@
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -30,7 +28,7 @@
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestFSImageStorageInspector {
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java
index 55ff190..11ae79a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java
@@ -34,16 +34,16 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.util.Lists;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 public class TestFSImageWithAcl {
   private static Configuration conf;
   private static MiniDFSCluster cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws IOException {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
@@ -51,7 +51,7 @@
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -73,7 +73,7 @@
     AclStatus s = cluster.getNamesystem().getAclStatus(p.toString());
     AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(
         new AclEntry[0]);
-    Assert.assertArrayEquals(new AclEntry[] {
+    Assertions.assertArrayEquals(new AclEntry[] {
         aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
         aclEntry(ACCESS, GROUP, READ) }, returned);
 
@@ -90,12 +90,12 @@
 
     s = cluster.getNamesystem().getAclStatus(p.toString());
     returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(new AclEntry[] { }, returned);
+    Assertions.assertArrayEquals(new AclEntry[] { }, returned);
 
     fs.modifyAclEntries(p, Lists.newArrayList(e));
     s = cluster.getNamesystem().getAclStatus(p.toString());
     returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(new AclEntry[] {
+    Assertions.assertArrayEquals(new AclEntry[] {
         aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
         aclEntry(ACCESS, GROUP, READ) }, returned);
   }
@@ -140,20 +140,20 @@
 
     AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(fileExpected, fileReturned);
+    Assertions.assertArrayEquals(fileExpected, fileReturned);
     AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(subdirExpected, subdirReturned);
+    Assertions.assertArrayEquals(subdirExpected, subdirReturned);
     assertPermission(fs, subdirPath, permExpected);
 
     restart(fs, persistNamespace);
 
     fileReturned = fs.getAclStatus(filePath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(fileExpected, fileReturned);
+    Assertions.assertArrayEquals(fileExpected, fileReturned);
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(subdirExpected, subdirReturned);
+    Assertions.assertArrayEquals(subdirExpected, subdirReturned);
     assertPermission(fs, subdirPath, permExpected);
 
     aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE));
@@ -161,40 +161,40 @@
 
     fileReturned = fs.getAclStatus(filePath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(fileExpected, fileReturned);
+    Assertions.assertArrayEquals(fileExpected, fileReturned);
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(subdirExpected, subdirReturned);
+    Assertions.assertArrayEquals(subdirExpected, subdirReturned);
     assertPermission(fs, subdirPath, permExpected);
 
     restart(fs, persistNamespace);
 
     fileReturned = fs.getAclStatus(filePath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(fileExpected, fileReturned);
+    Assertions.assertArrayEquals(fileExpected, fileReturned);
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(subdirExpected, subdirReturned);
+    Assertions.assertArrayEquals(subdirExpected, subdirReturned);
     assertPermission(fs, subdirPath, permExpected);
 
     fs.removeAcl(dirPath);
 
     fileReturned = fs.getAclStatus(filePath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(fileExpected, fileReturned);
+    Assertions.assertArrayEquals(fileExpected, fileReturned);
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(subdirExpected, subdirReturned);
+    Assertions.assertArrayEquals(subdirExpected, subdirReturned);
     assertPermission(fs, subdirPath, permExpected);
 
     restart(fs, persistNamespace);
 
     fileReturned = fs.getAclStatus(filePath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(fileExpected, fileReturned);
+    Assertions.assertArrayEquals(fileExpected, fileReturned);
     subdirReturned = fs.getAclStatus(subdirPath).getEntries()
       .toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(subdirExpected, subdirReturned);
+    Assertions.assertArrayEquals(subdirExpected, subdirReturned);
     assertPermission(fs, subdirPath, permExpected);
   }
 
@@ -221,7 +221,7 @@
     AclStatus s = cluster.getNamesystem().getAclStatus(rootdir.toString());
     AclEntry[] returned =
         Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(
+    Assertions.assertArrayEquals(
         new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE),
             aclEntry(ACCESS, GROUP, "bar", READ),
             aclEntry(ACCESS, GROUP, "foo", ALL) }, returned);
@@ -231,7 +231,7 @@
 
     s = cluster.getNamesystem().getAclStatus(rootdir.toString());
     returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(
+    Assertions.assertArrayEquals(
         new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE),
             aclEntry(ACCESS, GROUP, "bar", READ),
             aclEntry(ACCESS, GROUP, "foo", ALL) }, returned);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
index de527f0..67db1da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
@@ -38,10 +38,10 @@
 import org.apache.hadoop.hdfs.server.namenode.visitor.NamespacePrintVisitor;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 import java.io.File;
@@ -52,8 +52,8 @@
 import java.util.List;
 import java.util.Random;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test FSImage save/load when Snapshot is supported
@@ -78,7 +78,7 @@
   FSNamesystem fsn;
   DistributedFileSystem hdfs;
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES)
@@ -88,7 +88,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -199,8 +199,8 @@
     
     INodeDirectory rootNode = fsn.dir.getINode4Write(root.toString())
         .asDirectory();
-    assertTrue("The children list of root should be empty", 
-        rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
+      assertTrue(
+              rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty(), "The children list of root should be empty");
     // one snapshot on root: s1
     DiffList<DirectoryDiff> diffList = rootNode.getDiffs().asList();
     assertEquals(1, diffList.size());
@@ -322,15 +322,15 @@
     long numSnapshotAfter = fsn.getNumSnapshots();
     SnapshottableDirectoryStatus[] dirAfter = hdfs.getSnapshottableDirListing();
     
-    Assert.assertEquals(numSdirBefore, numSdirAfter);
-    Assert.assertEquals(numSnapshotBefore, numSnapshotAfter);
-    Assert.assertEquals(dirBefore.length, dirAfter.length);
+    Assertions.assertEquals(numSdirBefore, numSdirAfter);
+    Assertions.assertEquals(numSnapshotBefore, numSnapshotAfter);
+    Assertions.assertEquals(dirBefore.length, dirAfter.length);
     List<String> pathListBefore = new ArrayList<String>();
     for (SnapshottableDirectoryStatus sBefore : dirBefore) {
       pathListBefore.add(sBefore.getFullPath().toString());
     }
     for (SnapshottableDirectoryStatus sAfter : dirAfter) {
-      Assert.assertTrue(pathListBefore.contains(sAfter.getFullPath().toString()));
+      Assertions.assertTrue(pathListBefore.contains(sAfter.getFullPath().toString()));
     }
   }
   
@@ -610,7 +610,7 @@
     output.println(b);
 
     final String s = NamespacePrintVisitor.print2Sting(fsn);
-    Assert.assertEquals(b, s);
+    Assertions.assertEquals(b, s);
     return b;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
index 4cef321..75929c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
@@ -29,10 +29,10 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * 1) save xattrs, restart NN, assert xattrs reloaded from edit log, 
@@ -52,7 +52,7 @@
   private static final String name3 = "user.a3";
   private static final byte[] value3 = {};
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws IOException {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
@@ -60,7 +60,7 @@
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -79,20 +79,20 @@
     restart(fs, persistNamespace);
     
     Map<String, byte[]> xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 3);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
-    Assert.assertArrayEquals(value3, xattrs.get(name3));
+    Assertions.assertEquals(xattrs.size(), 3);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertArrayEquals(value3, xattrs.get(name3));
     
     fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
     
     restart(fs, persistNamespace);
     
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 3);
-    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
-    Assert.assertArrayEquals(value3, xattrs.get(name3));
+    Assertions.assertEquals(xattrs.size(), 3);
+    Assertions.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertArrayEquals(value3, xattrs.get(name3));
 
     fs.removeXAttr(path, name1);
     fs.removeXAttr(path, name2);
@@ -100,7 +100,7 @@
 
     restart(fs, persistNamespace);
     xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 0);
+    Assertions.assertEquals(xattrs.size(), 0);
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index f2f4244..a7f6781 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -23,7 +23,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static org.hamcrest.CoreMatchers.either;
 import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.junit.Assert.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -47,15 +48,15 @@
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
 import org.mockito.Mockito;
 
 import java.util.List;
 
 public class TestFSNamesystem {
 
-  @After
+  @AfterEach
   public void cleanUp() {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
   }
@@ -108,16 +109,16 @@
     FSNamesystem fsn = new FSNamesystem(conf, fsImage);
 
     fsn.leaveSafeMode(false);
-    assertTrue("After leaving safemode FSNamesystem.isInStartupSafeMode still "
-      + "returned true", !fsn.isInStartupSafeMode());
-    assertTrue("After leaving safemode FSNamesystem.isInSafeMode still returned"
-      + " true", !fsn.isInSafeMode());
+      assertTrue(!fsn.isInStartupSafeMode(), "After leaving safemode FSNamesystem.isInStartupSafeMode still "
+              + "returned true");
+      assertTrue(!fsn.isInSafeMode(), "After leaving safemode FSNamesystem.isInSafeMode still returned"
+              + " true");
 
     fsn.enterSafeMode(true);
-    assertTrue("After entering safemode due to low resources FSNamesystem."
-      + "isInStartupSafeMode still returned true", !fsn.isInStartupSafeMode());
-    assertTrue("After entering safemode due to low resources FSNamesystem."
-      + "isInSafeMode still returned false",  fsn.isInSafeMode());
+      assertTrue(!fsn.isInStartupSafeMode(), "After entering safemode due to low resources FSNamesystem."
+              + "isInStartupSafeMode still returned true");
+      assertTrue(  fsn.isInSafeMode(), "After entering safemode due to low resources FSNamesystem."
+              + "isInSafeMode still returned false");
   }
 
   @Test
@@ -144,17 +145,17 @@
     NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
 
     fsn.enterSafeMode(false);
-    assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
-    assertTrue("Replication queues were being populated during very first "
-        + "safemode", !bm.isPopulatingReplQueues());
+      assertTrue(fsn.isInSafeMode(), "FSNamesystem didn't enter safemode");
+      assertTrue(!bm.isPopulatingReplQueues(), "Replication queues were being populated during very first "
+              + "safemode");
     fsn.leaveSafeMode(false);
-    assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
-    assertTrue("Replication queues weren't being populated even after leaving "
-      + "safemode", bm.isPopulatingReplQueues());
+      assertTrue(!fsn.isInSafeMode(), "FSNamesystem didn't leave safemode");
+      assertTrue(bm.isPopulatingReplQueues(), "Replication queues weren't being populated even after leaving "
+              + "safemode");
     fsn.enterSafeMode(false);
-    assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
-    assertTrue("Replication queues weren't being populated after entering "
-      + "safemode 2nd time", bm.isPopulatingReplQueues());
+      assertTrue(fsn.isInSafeMode(), "FSNamesystem didn't enter safemode");
+      assertTrue(bm.isPopulatingReplQueues(), "Replication queues weren't being populated after entering "
+              + "safemode 2nd time");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
index f0ae181..737e58e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
@@ -41,7 +41,7 @@
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FSLOCK_FAIR_KEY;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
index ef1ed9b..3dad205 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
@@ -29,9 +29,9 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.LoggerFactory;
 
 import java.security.PrivilegedExceptionAction;
@@ -40,7 +40,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestFSNamesystemLockReport {
 
@@ -60,7 +60,7 @@
   private UserGroupInformation userGroupInfo;
   private GenericTestUtils.LogCapturer logs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.set(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, "hadoop");
@@ -82,7 +82,7 @@
         org.slf4j.event.Level.INFO);
   }
 
-  @After
+  @AfterEach
   public void cleanUp() throws Exception {
     if (fs != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
index 5a7da05..6bbf2d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 import java.lang.management.ManagementFactory;
 import java.util.HashSet;
@@ -155,8 +155,8 @@
       MBeanClient client = new MBeanClient();
       client.start();
       client.join(20000);
-      assertTrue("JMX calls are blocked when FSNamesystem's writerlock" +
-          "is owned by another thread", client.succeeded);
+        assertTrue(client.succeeded, "JMX calls are blocked when FSNamesystem's writerlock" +
+                "is owned by another thread");
       client.interrupt();
     } finally {
       if (fsn != null && fsn.hasWriteLock()) {
@@ -185,8 +185,8 @@
         MBeanClient client = new MBeanClient();
         client.start();
         client.join(20000);
-        assertTrue("JMX calls are blocked when FSEditLog" +
-            " is synchronized by another thread", client.succeeded);
+          assertTrue(client.succeeded, "JMX calls are blocked when FSEditLog" +
+                  " is synchronized by another thread");
         client.interrupt();
       }
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
index 6312e92..7f919a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
@@ -32,8 +32,8 @@
 import static org.apache.hadoop.fs.permission.FsAction.WRITE;
 import static org.apache.hadoop.fs.permission.FsAction.WRITE_EXECUTE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -52,10 +52,11 @@
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+
 /**
  * Unit tests covering FSPermissionChecker.  All tests in this suite have been
  * cross-validated against Linux setfacl/getfacl to check for consistency of the
@@ -76,7 +77,7 @@
   private FSDirectory dir;
   private INodeDirectory inodeRoot;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     Configuration conf = new Configuration();
     FSNamesystem fsn = mock(FSNamesystem.class);
@@ -418,11 +419,11 @@
       fail("expected AccessControlException for user + " + user + ", path = " +
         path + ", access = " + access);
     } catch (AccessControlException e) {
-      assertTrue("Permission denied messages must carry the username",
-              e.getMessage().contains(user.getUserName().toString()));
-      assertTrue("Permission denied messages must carry the path parent",
-              e.getMessage().contains(
-                  new Path(path).getParent().toUri().getPath()));
+        assertTrue(
+                e.getMessage().contains(user.getUserName().toString()), "Permission denied messages must carry the username");
+        assertTrue(
+                e.getMessage().contains(
+                        new Path(path).getParent().toUri().getPath()), "Permission denied messages must carry the path parent");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
index f5a112c..8fd9012 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
@@ -18,8 +18,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -44,10 +44,10 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 
 
 public class TestFavoredNodesEndToEnd {
@@ -64,7 +64,7 @@
   private static DistributedFileSystem dfs;
   private static ArrayList<DataNode> datanodes;
   
-  @BeforeClass
+  @BeforeAll
   public static void setUpBeforeClass() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
@@ -74,7 +74,7 @@
     datanodes = cluster.getDataNodes();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownAfterClass() throws Exception {
     if (cluster != null) { 
       cluster.shutdown();
@@ -150,15 +150,15 @@
     d.stopDecommission();
 
     BlockLocation[] locations = getBlockLocations(p);
-    Assert.assertEquals(replication, locations[0].getNames().length);;
+    Assertions.assertEquals(replication, locations[0].getNames().length);;
     //also make sure that the datanode[0] is not in the list of hosts
     for (int i = 0; i < replication; i++) {
       final String loc = locations[0].getNames()[i];
       int j = 0;
       for(; j < hosts.length && !loc.equals(hosts[j]); j++);
-      Assert.assertTrue("j=" + j, j > 0);
-      Assert.assertTrue("loc=" + loc + " not in host list "
-          + Arrays.asList(hosts) + ", j=" + j, j < hosts.length);
+        Assertions.assertTrue(j > 0, "j=" + j);
+        Assertions.assertTrue(j < hosts.length, "loc=" + loc + " not in host list "
+                + Arrays.asList(hosts) + ", j=" + j);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java
index f9a6889..9fcf7f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java
@@ -28,14 +28,14 @@
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Tests for ACL operation through FileContext APIs
  */
 public class TestFileContextAcl extends FSAclBaseTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new Configuration();
     startCluster();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java
index da09298..0a91d32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java
@@ -31,7 +31,6 @@
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.junit.BeforeClass;
 
 /**
  * Tests of XAttr operations using FileContext APIs.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
index 0155d87..06b78d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
@@ -20,9 +20,7 @@
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL;
 import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.FilenameFilter;
@@ -44,9 +42,9 @@
 import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.NativeCodeLoader;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
@@ -67,7 +65,7 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
-  @Before
+  @BeforeEach
   public void setUp() {
     conf = new Configuration();
   }
@@ -387,8 +385,8 @@
     assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101));
     assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 150));
     assertEquals("[1001,1100]", getLogsAsString(fjm, 201));
-    assertEquals("Asking for a newer log than exists should return empty list",
-        "", getLogsAsString(fjm, 9999));
+      assertEquals(
+              "", getLogsAsString(fjm, 9999), "Asking for a newer log than exists should return empty list");
   }
 
   /**
@@ -446,7 +444,7 @@
     EditLogInputStream elis = getJournalInputStream(jm, 5, true);
     try {
       FSEditLogOp op = elis.readOp();
-      assertEquals("read unexpected op", op.getTransactionId(), 5);
+        assertEquals(op.getTransactionId(), 5, "read unexpected op");
     } finally {
       IOUtils.cleanupWithLogger(LOG, elis);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
index 636dc60..09a65c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
@@ -85,8 +85,8 @@
       // check that / exists
       //
       Path path = new Path("/");
-      assertTrue("/ should be a directory", 
-                 fs.getFileStatus(path).isDirectory());
+        assertTrue(
+                fs.getFileStatus(path).isDirectory(), "/ should be a directory");
       currentNodes = 1;          // root inode
 
       // verify that we can create the specified number of files. We leave
@@ -108,7 +108,7 @@
       } catch (IOException e) {
         hitException = true;
       }
-      assertTrue("Was able to exceed file limit", hitException);
+        assertTrue(hitException, "Was able to exceed file limit");
 
       // delete one file
       Path file0 = new Path("/filestatus0");
@@ -148,7 +148,7 @@
       } catch (IOException e) {
         hitException = true;
       }
-      assertTrue("Was able to exceed dir limit", hitException);
+        assertTrue(hitException, "Was able to exceed dir limit");
 
     } finally {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 57f5ea3..f178b35 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -20,15 +20,9 @@
 
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.concurrent.ThreadLocalRandom;
@@ -69,9 +63,9 @@
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.ToolRunner;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestFileTruncate {
   static {
@@ -96,7 +90,7 @@
 
  private Path parent;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -113,7 +107,7 @@
     parent = new Path("/test");
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if(fs != null) {
       fs.close();
@@ -146,16 +140,16 @@
         LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
             + ", toTruncate=" + toTruncate + ", isReady=" + isReady);
 
-        assertEquals("File must be closed for zero truncate"
-            + " or truncating at the block boundary",
-            isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
+          assertEquals(
+                  isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0, "File must be closed for zero truncate"
+                  + " or truncating at the block boundary");
         if (!isReady) {
           checkBlockRecovery(p);
         }
 
         ContentSummary cs = fs.getContentSummary(parent);
-        assertEquals("Bad disk space usage",
-            cs.getSpaceConsumed(), newLength * REPLICATION);
+          assertEquals(
+                  cs.getSpaceConsumed(), newLength * REPLICATION, "Bad disk space usage");
         // validate the file content
         checkFullFile(p, newLength, contents);
       }
@@ -177,10 +171,10 @@
       final int newLength = ThreadLocalRandom.current().nextInt(n);
       final boolean isReady = fs.truncate(p, newLength);
       LOG.info("newLength=" + newLength + ", isReady=" + isReady);
-      assertEquals("File must be closed for truncating at the block boundary",
-          isReady, newLength % BLOCK_SIZE == 0);
-      assertEquals("Truncate is not idempotent",
-          isReady, fs.truncate(p, newLength));
+        assertEquals(
+                isReady, newLength % BLOCK_SIZE == 0, "File must be closed for truncating at the block boundary");
+        assertEquals(
+                isReady, fs.truncate(p, newLength), "Truncate is not idempotent");
       if (!isReady) {
         checkBlockRecovery(p);
       }
@@ -210,8 +204,8 @@
         " newLength must not be multiple of BLOCK_SIZE";
     final boolean isReady = fs.truncate(p, newLength);
     LOG.info("newLength=" + newLength + ", isReady=" + isReady);
-    assertEquals("File must be closed for truncating at the block boundary",
-        isReady, newLength % BLOCK_SIZE == 0);
+      assertEquals(
+              isReady, newLength % BLOCK_SIZE == 0, "File must be closed for truncating at the block boundary");
     fs.deleteSnapshot(dir, snapshot);
     if (!isReady) {
       checkBlockRecovery(p);
@@ -381,7 +375,7 @@
     // Truncate to block boundary
     int newLength = length[0] + BLOCK_SIZE / 2;
     boolean isReady = fs.truncate(src, newLength);
-    assertTrue("Recovery is not expected.", isReady);
+      assertTrue(isReady, "Recovery is not expected.");
     assertFileLength(snapshotFiles[2], length[2]);
     assertFileLength(snapshotFiles[1], length[1]);
     assertFileLength(snapshotFiles[0], length[0]);
@@ -394,7 +388,7 @@
     // Truncate full block again
     newLength = length[0] - BLOCK_SIZE / 2;
     isReady = fs.truncate(src, newLength);
-    assertTrue("Recovery is not expected.", isReady);
+      assertTrue(isReady, "Recovery is not expected.");
     assertFileLength(snapshotFiles[2], length[2]);
     assertFileLength(snapshotFiles[1], length[1]);
     assertFileLength(snapshotFiles[0], length[0]);
@@ -406,7 +400,7 @@
     // Truncate half of the last block
     newLength -= BLOCK_SIZE / 2;
     isReady = fs.truncate(src, newLength);
-    assertFalse("Recovery is expected.", isReady);
+      assertFalse(isReady, "Recovery is expected.");
     checkBlockRecovery(src);
     assertFileLength(snapshotFiles[2], length[2]);
     assertFileLength(snapshotFiles[1], length[1]);
@@ -425,13 +419,13 @@
     // Delete file. Should still be able to read snapshots
     int numINodes = fsDir.getInodeMapSize();
     isReady = fs.delete(src, false);
-    assertTrue("Delete failed.", isReady);
+      assertTrue(isReady, "Delete failed.");
     assertFileLength(snapshotFiles[3], length[3]);
     assertFileLength(snapshotFiles[2], length[2]);
     assertFileLength(snapshotFiles[1], length[1]);
     assertFileLength(snapshotFiles[0], length[0]);
-    assertEquals("Number of INodes should not change",
-        numINodes, fsDir.getInodeMapSize());
+      assertEquals(
+              numINodes, fsDir.getInodeMapSize(), "Number of INodes should not change");
 
     fs.deleteSnapshot(parent, ss[3]);
 
@@ -449,8 +443,8 @@
     assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
     assertBlockExists(firstBlk);
     assertBlockExists(lastBlk);
-    assertEquals("Number of INodes should not change",
-        numINodes, fsDir.getInodeMapSize());
+      assertEquals(
+              numINodes, fsDir.getInodeMapSize(), "Number of INodes should not change");
 
     // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
     contentSummary = fs.getContentSummary(parent);
@@ -468,8 +462,8 @@
       // Diskspace consumed should be 48 bytes * 3. [SS:1,2,3,4]
       assertThat(contentSummary.getSpaceConsumed(), is(48L));
     }
-    assertEquals("Number of INodes should not change",
-        numINodes, fsDir .getInodeMapSize());
+      assertEquals(
+              numINodes, fsDir .getInodeMapSize(), "Number of INodes should not change");
 
     fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
     assertBlockNotPresent(firstBlk);
@@ -478,8 +472,8 @@
     // Diskspace consumed should be 0 bytes * 3. []
     contentSummary = fs.getContentSummary(parent);
     assertThat(contentSummary.getSpaceConsumed(), is(0L));
-    assertNotEquals("Number of INodes should change",
-        numINodes, fsDir.getInodeMapSize());
+      assertNotEquals(
+              numINodes, fsDir.getInodeMapSize(), "Number of INodes should change");
   }
 
   /**
@@ -521,7 +515,7 @@
     snapshotFiles[0] = new Path(snapshotDir, truncateFile);
     length[1] = 2 * BLOCK_SIZE;
     boolean isReady = fs.truncate(src, 2 * BLOCK_SIZE);
-    assertTrue("Recovery is not expected.", isReady);
+      assertTrue(isReady, "Recovery is not expected.");
 
     // Diskspace consumed should be 12 bytes * 3. [blk 1,2 SS:3]
     contentSummary = fs.getContentSummary(parent);
@@ -532,7 +526,7 @@
     // Create another snapshot with truncate
     length[2] = BLOCK_SIZE + BLOCK_SIZE / 2;
     isReady = fs.truncate(src, BLOCK_SIZE + BLOCK_SIZE / 2);
-    assertFalse("Recovery is expected.", isReady);
+      assertFalse(isReady, "Recovery is expected.");
     checkBlockRecovery(src);
     snapshotDir = fs.createSnapshot(parent, ss[2]);
     snapshotFiles[2] = new Path(snapshotDir, truncateFile);
@@ -922,7 +916,7 @@
       for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
         Thread.sleep(SLEEP);
       }
-      assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
+        assertFalse(cluster.isDataNodeUp(), "All DataNodes should be down.");
       LocatedBlocks blocks = getLocatedBlocks(p);
       assertTrue(blocks.isUnderConstruction());
     } finally {
@@ -1210,14 +1204,14 @@
     final int newLength = fileLength/3;
     boolean isReady = fs.truncate(link, newLength);
 
-    assertTrue("Recovery is not expected.", isReady);
+      assertTrue(isReady, "Recovery is not expected.");
 
     FileStatus fileStatus = fs.getFileStatus(file);
     assertThat(fileStatus.getLen(), is((long) newLength));
 
     ContentSummary cs = fs.getContentSummary(parent);
-    assertEquals("Bad disk space usage",
-        cs.getSpaceConsumed(), newLength * REPLICATION);
+      assertEquals(
+              cs.getSpaceConsumed(), newLength * REPLICATION, "Bad disk space usage");
     // validate the file content
     checkFullFile(file, newLength, contents);
 
@@ -1236,7 +1230,7 @@
     //start rolling upgrade
     dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
     int status = dfsadmin.run(new String[]{"-rollingUpgrade", "prepare"});
-    assertEquals("could not prepare for rolling upgrade", 0, status);
+      assertEquals(0, status, "could not prepare for rolling upgrade");
     dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
 
     Path dir = new Path("/testTruncateWithRollingUpgrade");
@@ -1246,22 +1240,22 @@
     ThreadLocalRandom.current().nextBytes(data);
     writeContents(data, data.length, p);
 
-    assertEquals("block num should 1", 1,
-        cluster.getNamesystem().getFSDirectory().getBlockManager()
-            .getTotalBlocks());
+      assertEquals(1,
+              cluster.getNamesystem().getFSDirectory().getBlockManager()
+                      .getTotalBlocks(), "block num should 1");
 
     final boolean isReady = fs.truncate(p, 2);
-    assertFalse("should be copy-on-truncate", isReady);
-    assertEquals("block num should 2", 2,
-        cluster.getNamesystem().getFSDirectory().getBlockManager()
-            .getTotalBlocks());
+      assertFalse(isReady, "should be copy-on-truncate");
+      assertEquals(2,
+              cluster.getNamesystem().getFSDirectory().getBlockManager()
+                      .getTotalBlocks(), "block num should 2");
     fs.delete(p, true);
 
-    assertEquals("block num should 0", 0,
-        cluster.getNamesystem().getFSDirectory().getBlockManager()
-            .getTotalBlocks());
+      assertEquals(0,
+              cluster.getNamesystem().getFSDirectory().getBlockManager()
+                      .getTotalBlocks(), "block num should 0");
     status = dfsadmin.run(new String[]{"-rollingUpgrade", "finalize"});
-    assertEquals("could not finalize rolling upgrade", 0, status);
+      assertEquals(0, status, "could not finalize rolling upgrade");
   }
 
   static void writeContents(byte[] contents, int fileLength, Path p)
@@ -1308,18 +1302,18 @@
   }
 
   static void assertBlockExists(Block blk) {
-    assertNotNull("BlocksMap does not contain block: " + blk,
-        cluster.getNamesystem().getStoredBlock(blk));
+      assertNotNull(
+              cluster.getNamesystem().getStoredBlock(blk), "BlocksMap does not contain block: " + blk);
   }
 
   static void assertBlockNotPresent(Block blk) {
-    assertNull("BlocksMap should not contain block: " + blk,
-        cluster.getNamesystem().getStoredBlock(blk));
+      assertNull(
+              cluster.getNamesystem().getStoredBlock(blk), "BlocksMap should not contain block: " + blk);
   }
 
   static void assertFileLength(Path file, long length) throws IOException {
     byte[] data = DFSTestUtil.readFileBuffer(fs, file);
-    assertEquals("Wrong data size in snapshot.", length, data.length);
+      assertEquals(length, data.length, "Wrong data size in snapshot.");
   }
 
   static void checkFullFile(Path p, int newLength, byte[] contents)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsImageValidation.java
index af30f1a..b9a3ed7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsImageValidation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsImageValidation.java
@@ -21,8 +21,8 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -51,7 +51,7 @@
 
     try {
       final int errorCount = FsImageValidation.newInstance().run();
-      Assert.assertEquals("Error Count: " + errorCount, 0, errorCount);
+        Assertions.assertEquals(0, errorCount, "Error Count: " + errorCount);
     } catch (HadoopIllegalArgumentException e) {
       LOG.warn("The environment variable {} is not set: {}",
           FsImageValidation.FS_IMAGE, e);
@@ -63,7 +63,7 @@
     final Configuration conf = new Configuration();
     final String nsId = "cluster0";
     FsImageValidation.setHaConf(nsId, conf);
-    Assert.assertTrue(HAUtil.isHAEnabled(conf, nsId));
+    Assertions.assertTrue(HAUtil.isHAEnabled(conf, nsId));
   }
 
   @Test
@@ -81,14 +81,14 @@
     LOG.info("{} ?= {}", n, s);
     for(int i = s.length(); i > 0;) {
       for(int j = 0; j < 3 && i > 0; j++) {
-        Assert.assertTrue(Character.isDigit(s.charAt(--i)));
+        Assertions.assertTrue(Character.isDigit(s.charAt(--i)));
       }
       if (i > 0) {
-        Assert.assertEquals(',', s.charAt(--i));
+        Assertions.assertEquals(',', s.charAt(--i));
       }
     }
 
-    Assert.assertNotEquals(0, s.length()%4);
-    Assert.assertEquals(n, Long.parseLong(s.replaceAll(",", "")));
+    Assertions.assertNotEquals(0, s.length()%4);
+    Assertions.assertEquals(n, Long.parseLong(s.replaceAll(",", "")));
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
index dd9ca22..7965121 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
@@ -39,8 +39,8 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestFsLimits {
   static Configuration conf;
@@ -59,7 +59,7 @@
     return fsn;
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     conf = new Configuration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 049a785..d99882f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -20,12 +20,7 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED;
 import static org.apache.hadoop.hdfs.MiniDFSCluster.HDFS_MINIDFS_BASEDIR;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.mock;
@@ -124,10 +119,10 @@
 import org.apache.log4j.Logger;
 import org.apache.log4j.PatternLayout;
 import org.apache.log4j.RollingFileAppender;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.LoggerFactory;
 
 /**
@@ -184,14 +179,14 @@
   private MiniDFSCluster cluster = null;
   private Configuration conf = null;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED,
         false);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     shutdownCluster();
   }
@@ -277,14 +272,14 @@
       for (int i = 0; i < 2; i++) {
         line = reader.readLine();
         assertNotNull(line);
-        assertTrue("Expected getfileinfo event not found in audit log",
-            GET_FILE_INFO_PATTERN.matcher(line).matches());
+          assertTrue(
+                  GET_FILE_INFO_PATTERN.matcher(line).matches(), "Expected getfileinfo event not found in audit log");
       }
       line = reader.readLine();
       assertNotNull(line);
-      assertTrue("Expected fsck event not found in audit log", FSCK_PATTERN
-          .matcher(line).matches());
-      assertNull("Unexpected event in audit log", reader.readLine());
+        assertTrue(FSCK_PATTERN
+                .matcher(line).matches(), "Expected fsck event not found in audit log");
+        assertNull(reader.readLine(), "Unexpected event in audit log");
     } finally {
       // Close the reader and remove the appender to release the audit log file
       // handle after verifying the content of the file.
@@ -1149,8 +1144,8 @@
         }
         for (File metadataFile : metadataFiles) {
           File blockFile = Block.metaToBlockFile(metadataFile);
-          assertTrue("Cannot remove file.", blockFile.delete());
-          assertTrue("Cannot remove file.", metadataFile.delete());
+            assertTrue(blockFile.delete(), "Cannot remove file.");
+            assertTrue(metadataFile.delete(), "Cannot remove file.");
         }
       }
     }
@@ -1230,10 +1225,10 @@
     File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
     cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numReplicas).build();
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
 
     // Create a file that will be intentionally under-replicated
     final String pathString = new String("/testfile");
@@ -1292,10 +1287,10 @@
     File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
     cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
 
     // Create a file that will be intentionally under-replicated
     final String pathString = new String("/testfile");
@@ -1467,10 +1462,10 @@
     cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
 
     DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
@@ -1522,10 +1517,10 @@
     cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
 
     DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
@@ -1610,10 +1605,10 @@
         .racks(racks)
         .build();
 
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
 
     DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
@@ -1729,10 +1724,10 @@
     cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
 
     DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
@@ -1842,10 +1837,10 @@
     cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
 
     DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
@@ -1927,10 +1922,10 @@
         .racks(racks)
         .build();
 
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
 
     DFSTestUtil util = new DFSTestUtil.Builder().
         setName(getClass().getSimpleName()).setNumFiles(1).build();
@@ -2133,8 +2128,8 @@
         }
         for (File metadataFile : metadataFiles) {
           File blockFile = Block.metaToBlockFile(metadataFile);
-          assertTrue("Cannot remove file.", blockFile.delete());
-          assertTrue("Cannot remove file.", metadataFile.delete());
+            assertTrue(blockFile.delete(), "Cannot remove file.");
+            assertTrue(metadataFile.delete(), "Cannot remove file.");
         }
       }
     }
@@ -2233,7 +2228,7 @@
             }
           }
           if (numCorrupt == null) {
-            Assert.fail("Cannot find corrupt blocks count in fsck output.");
+            Assertions.fail("Cannot find corrupt blocks count in fsck output.");
           }
           if (Integer.parseInt(numCorrupt) == ctf.getTotalMissingBlocks()) {
             assertTrue(str.contains(NamenodeFsck.CORRUPT_STATUS));
@@ -2241,7 +2236,7 @@
           }
         } catch (Exception e) {
           LOG.error("Exception caught", e);
-          Assert.fail("Caught unexpected exception.");
+          Assertions.fail("Caught unexpected exception.");
         }
         return false;
       }
@@ -2265,7 +2260,7 @@
     for (LocatedFileStatus lfs: retVal) {
       totalLength += lfs.getLen();
     }
-    Assert.assertTrue("Nothing is moved to lost+found!", totalLength > 0);
+      Assertions.assertTrue(totalLength > 0, "Nothing is moved to lost+found!");
     util.cleanup(dfs, srcDir);
   }
 
@@ -2375,7 +2370,7 @@
       File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
       File blkFile = MiniDFSCluster.getBlockFile(storageDir,
           blks[i].getBlock());
-      Assert.assertTrue("Block file does not exist", blkFile.exists());
+        Assertions.assertTrue(blkFile.exists(), "Block file does not exist");
 
       FileOutputStream out = new FileOutputStream(blkFile);
       out.write("corruption".getBytes());
@@ -2465,7 +2460,7 @@
           }
         } catch (Exception e) {
           LOG.error("Exception caught", e);
-          Assert.fail("Caught unexpected exception.");
+          Assertions.fail("Caught unexpected exception.");
         }
         return false;
       }
@@ -2486,7 +2481,7 @@
       Path fileName = new Path(filePath);
       DFSTestUtil.createFile(fs, fileName, 512, (short) 2, 0);
       DFSTestUtil.waitReplication(fs, fileName, (short) 2);
-      Assert.assertTrue("File not created", fs.exists(fileName));
+        Assertions.assertTrue(fs.exists(fileName), "File not created");
       cluster.getDataNodes().get(1).shutdown();
       DFSTestUtil.appendFile(fs, fileName, "appendCorruptBlock");
       cluster.restartDataNode(1, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java
index 327c51c..2d32179 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java
@@ -36,8 +36,8 @@
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -121,7 +121,7 @@
         LOG.info("urls[" + i + "]=" + urls[i]);
         final String result = TestFsck.runFsck(conf, 0, false, urls[i]);
         LOG.info("result=" + result);
-        Assert.assertTrue(result.contains("Status: HEALTHY"));
+        Assertions.assertTrue(result.contains("Status: HEALTHY"));
       }
 
       // Test viewfs
@@ -138,7 +138,7 @@
         LOG.info("vurls[" + i + "]=" + vurls[i]);
         final String result = TestFsck.runFsck(conf, 0, false, vurls[i]);
         LOG.info("result=" + result);
-        Assert.assertTrue(result.contains("Status: HEALTHY"));
+        Assertions.assertTrue(result.contains("Status: HEALTHY"));
       }
     } finally {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
index edcf9e1..690f1a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 
 import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
index 03aa440..3261c74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java
@@ -28,16 +28,14 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.security.PrivilegedExceptionAction;
 
 import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * This class tests get content summary with permission settings.
@@ -50,7 +48,7 @@
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -61,7 +59,7 @@
     dfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
index c78f4e0..fd954c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
@@ -32,7 +32,7 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authorize.AccessControlList;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 public class TestGetImageServlet {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
index 1608a84..00ec872 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
@@ -18,12 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 
@@ -46,10 +41,10 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestHDFSConcat {
   public static final Logger LOG =
@@ -71,18 +66,18 @@
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
   }
   
-  @Before
+  @BeforeEach
   public void startUpCluster() throws IOException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
-    assertNotNull("Failed Cluster Creation", cluster);
+      assertNotNull(cluster, "Failed Cluster Creation");
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
-    assertNotNull("Failed to get FileSystem", dfs);
+      assertNotNull(dfs, "Failed to get FileSystem");
     nn = cluster.getNameNodeRpc();
-    assertNotNull("Failed to get NameNode", nn);
+      assertNotNull(nn, "Failed to get NameNode");
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if(dfs != null) {
       dfs.close();
@@ -209,7 +204,7 @@
     // 3. removal of the src file
     for(Path p: files) {
       fStatus = nn.getFileInfo(p.toUri().getPath());
-      assertNull("File " + p + " still exists", fStatus); // file shouldn't exist
+        assertNull(fStatus, "File " + p + " still exists"); // file shouldn't exist
       // try to create fie with the same name
       DFSTestUtil.createFile(dfs, p, fileLen, REPL_FACTOR, 1); 
     }
@@ -290,7 +285,7 @@
       if(mismatch)
         break;
     }
-    assertFalse("File content of concatenated file is different", mismatch);
+      assertFalse(mismatch, "File content of concatenated file is different");
   }
 
   // test case when final block is not of a full length
@@ -360,7 +355,7 @@
     
     // 3. removal of the src file
     fStatus = nn.getFileInfo(name2);
-    assertNull("File "+name2+ "still exists", fStatus); // file shouldn't exist
+      assertNull(fStatus, "File " + name2 + "still exists"); // file shouldn't exist
   
     // 4. content
     checkFileContent(byteFileConcat, new byte [] [] {byteFile1, byteFile2});
@@ -445,14 +440,14 @@
     }
 
     ContentSummary summary = dfs.getContentSummary(foo);
-    Assert.assertEquals(11, summary.getFileCount());
-    Assert.assertEquals(blockSize * REPL_FACTOR +
+    Assertions.assertEquals(11, summary.getFileCount());
+    Assertions.assertEquals(blockSize * REPL_FACTOR +
             blockSize * 2 * srcRepl * srcNum, summary.getSpaceConsumed());
 
     dfs.concat(target, srcs);
     summary = dfs.getContentSummary(foo);
-    Assert.assertEquals(1, summary.getFileCount());
-    Assert.assertEquals(
+    Assertions.assertEquals(1, summary.getFileCount());
+    Assertions.assertEquals(
         blockSize * REPL_FACTOR + blockSize * 2 * REPL_FACTOR * srcNum,
         summary.getSpaceConsumed());
   }
@@ -476,22 +471,22 @@
     }
 
     ContentSummary summary = dfs.getContentSummary(bar);
-    Assert.assertEquals(11, summary.getFileCount());
-    Assert.assertEquals(dsQuota, summary.getSpaceConsumed());
+    Assertions.assertEquals(11, summary.getFileCount());
+    Assertions.assertEquals(dsQuota, summary.getSpaceConsumed());
 
     try {
       dfs.concat(target, srcs);
       fail("QuotaExceededException expected");
     } catch (RemoteException e) {
-      Assert.assertTrue(
+      Assertions.assertTrue(
           e.unwrapRemoteException() instanceof QuotaExceededException);
     }
 
     dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
     dfs.concat(target, srcs);
     summary = dfs.getContentSummary(bar);
-    Assert.assertEquals(1, summary.getFileCount());
-    Assert.assertEquals(blockSize * repl * (srcNum + 1),
+    Assertions.assertEquals(1, summary.getFileCount());
+    Assertions.assertEquals(blockSize * repl * (srcNum + 1),
         summary.getSpaceConsumed());
   }
 
@@ -519,7 +514,7 @@
     DFSTestUtil.createFile(dfs, src, blockSize, REPL_FACTOR, 1);
     try {
       dfs.concat(trg, new Path[] { src });
-      Assert.fail("Must throw Exception!");
+      Assertions.fail("Must throw Exception!");
     } catch (IOException e) {
       String errMsg = "Concat operation doesn't support "
           + FSDirectory.DOT_RESERVED_STRING + " relative path : " + trg;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
index e86413d..e92a3b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.lang.management.ManagementFactory;
 import java.util.Arrays;
@@ -37,7 +37,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -135,8 +135,8 @@
       ObjectName mxbeanName = new ObjectName(
               "Hadoop:service=NameNode,name=NameNodeInfo");
       String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
-      assertTrue("Live nodes should contain the decommissioned node",
-              nodes.contains("Decommissioned"));
+        assertTrue(
+                nodes.contains("Decommissioned"), "Live nodes should contain the decommissioned node");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index 776a198..7bbaea7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -41,10 +41,10 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Lists;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -216,7 +216,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     CALLED.clear();
     Configuration conf = new HdfsConfiguration();
@@ -230,7 +230,7 @@
     miniDFS = new MiniDFSCluster.Builder(conf).build();
   }
 
-  @After
+  @AfterEach
   public void cleanUp() throws IOException {
     CALLED.clear();
     if (miniDFS != null) {
@@ -238,12 +238,12 @@
       miniDFS = null;
     }
     runPermissionCheck = false;
-    Assert.assertTrue(CALLED.contains("stop"));
+    Assertions.assertTrue(CALLED.contains("stop"));
   }
 
   @Test
   public void testDelegationToProvider() throws Exception {
-    Assert.assertTrue(CALLED.contains("start"));
+    Assertions.assertTrue(CALLED.contains("start"));
     FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
     final Path tmpPath = new Path("/tmp");
     final Path fooPath = new Path("/tmp/foo");
@@ -258,20 +258,20 @@
         FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
         CALLED.clear();
         fs.mkdirs(fooPath);
-        Assert.assertTrue(CALLED.contains("getAttributes"));
-        Assert.assertTrue(CALLED.contains("checkPermission|null|null|null"));
-        Assert.assertTrue(CALLED.contains("checkPermission|WRITE|null|null"));
+        Assertions.assertTrue(CALLED.contains("getAttributes"));
+        Assertions.assertTrue(CALLED.contains("checkPermission|null|null|null"));
+        Assertions.assertTrue(CALLED.contains("checkPermission|WRITE|null|null"));
 
         CALLED.clear();
         fs.listStatus(fooPath);
-        Assert.assertTrue(CALLED.contains("getAttributes"));
-        Assert.assertTrue(
+        Assertions.assertTrue(CALLED.contains("getAttributes"));
+        Assertions.assertTrue(
             CALLED.contains("checkPermission|null|null|READ_EXECUTE"));
 
         CALLED.clear();
         fs.getAclStatus(fooPath);
-        Assert.assertTrue(CALLED.contains("getAttributes"));
-        Assert.assertTrue(CALLED.contains("checkPermission|null|null|null"));
+        Assertions.assertTrue(CALLED.contains("getAttributes"));
+        Assertions.assertTrue(CALLED.contains("checkPermission|null|null|null"));
         return null;
       }
     });
@@ -284,9 +284,9 @@
     }
     public void doAssert(boolean x) {
       if (bypass) {
-        Assert.assertFalse(x);
+        Assertions.assertFalse(x);
       } else {
-        Assert.assertTrue(x);
+        Assertions.assertTrue(x);
       }
     }
   }
@@ -295,7 +295,7 @@
       final short expectedPermission, final boolean bypass) throws Exception {
     final AssertHelper asserter = new AssertHelper(bypass);
 
-    Assert.assertTrue(CALLED.contains("start"));
+    Assertions.assertTrue(CALLED.contains("start"));
 
     FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
     final Path userPath = new Path("/user");
@@ -316,13 +316,13 @@
         @Override
         public Void run() throws Exception {
           FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
-          Assert.assertEquals(expectedPermission,
+          Assertions.assertEquals(expectedPermission,
               fs.getFileStatus(authzChild).getPermission().toShort());
           asserter.doAssert(CALLED.contains("getAttributes"));
           asserter.doAssert(CALLED.contains("checkPermission|null|null|null"));
 
           CALLED.clear();
-          Assert.assertEquals(expectedPermission,
+          Assertions.assertEquals(expectedPermission,
               fs.listStatus(userPath)[0].getPermission().toShort());
           asserter.doAssert(CALLED.contains("getAttributes"));
           asserter.doAssert(
@@ -362,28 +362,28 @@
     Path userDir = new Path("/user/" + ugi.getShortUserName());
     fs.mkdirs(userDir);
     status = fs.getFileStatus(userDir);
-    Assert.assertEquals(ugi.getShortUserName(), status.getOwner());
-    Assert.assertEquals("supergroup", status.getGroup());
-    Assert.assertEquals(new FsPermission((short) 0755), status.getPermission());
+    Assertions.assertEquals(ugi.getShortUserName(), status.getOwner());
+    Assertions.assertEquals("supergroup", status.getGroup());
+    Assertions.assertEquals(new FsPermission((short) 0755), status.getPermission());
 
     Path authzDir = new Path("/user/authz");
     fs.mkdirs(authzDir);
     status = fs.getFileStatus(authzDir);
-    Assert.assertEquals("foo", status.getOwner());
-    Assert.assertEquals("bar", status.getGroup());
-    Assert.assertEquals(new FsPermission((short) 0770), status.getPermission());
+    Assertions.assertEquals("foo", status.getOwner());
+    Assertions.assertEquals("bar", status.getGroup());
+    Assertions.assertEquals(new FsPermission((short) 0770), status.getPermission());
 
     AclStatus aclStatus = fs.getAclStatus(authzDir);
-    Assert.assertEquals(1, aclStatus.getEntries().size());
-    Assert.assertEquals(AclEntryType.GROUP,
+    Assertions.assertEquals(1, aclStatus.getEntries().size());
+    Assertions.assertEquals(AclEntryType.GROUP,
         aclStatus.getEntries().get(0).getType());
-    Assert.assertEquals("xxx",
+    Assertions.assertEquals("xxx",
         aclStatus.getEntries().get(0).getName());
-    Assert.assertEquals(FsAction.ALL,
+    Assertions.assertEquals(FsAction.ALL,
         aclStatus.getEntries().get(0).getPermission());
     Map<String, byte[]> xAttrs = fs.getXAttrs(authzDir);
-    Assert.assertTrue(xAttrs.containsKey("user.test"));
-    Assert.assertEquals(2, xAttrs.get("user.test").length);
+    Assertions.assertTrue(xAttrs.containsKey("user.test"));
+    Assertions.assertEquals(2, xAttrs.get("user.test").length);
   }
 
   /**
@@ -419,7 +419,7 @@
       Path aclChildDir = new Path(aclDir, "subdir");
       fs.mkdirs(aclChildDir);
       AclStatus aclStatus = fs.getAclStatus(aclDir);
-      Assert.assertEquals(0, aclStatus.getEntries().size());
+      Assertions.assertEquals(0, aclStatus.getEntries().size());
       return null;
     });
   }
@@ -445,12 +445,12 @@
       @Override
       public Void run() throws Exception {
         FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
-        Assert.assertEquals(PROVIDER_PERMISSION,
+        Assertions.assertEquals(PROVIDER_PERMISSION,
             fs.getFileStatus(authzChild).getPermission().toShort());
 
-        Assert.assertEquals("foo", fs.getAclStatus(authzChild).getOwner());
-        Assert.assertEquals("bar", fs.getAclStatus(authzChild).getGroup());
-        Assert.assertEquals(PROVIDER_PERMISSION,
+        Assertions.assertEquals("foo", fs.getAclStatus(authzChild).getOwner());
+        Assertions.assertEquals("bar", fs.getAclStatus(authzChild).getGroup());
+        Assertions.assertEquals(PROVIDER_PERMISSION,
             fs.getAclStatus(authzChild).getPermission().toShort());
         return null;
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index b32f8fe..0711a06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -20,11 +20,7 @@
 
 import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
 import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -73,8 +69,8 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -189,8 +185,8 @@
         null, perm, 0L, 0L, null, replication, null /*ec policy*/,
         preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS);
 
-    Assert.assertTrue(!inodeFile.isStriped());
-    Assert.assertEquals(replication.shortValue(),
+    Assertions.assertTrue(!inodeFile.isStriped());
+    Assertions.assertEquals(replication.shortValue(),
         inodeFile.getFileReplication());
   }
 
@@ -203,8 +199,8 @@
     replication = 3;
     preferredBlockSize = 128*1024*1024;
     INodeFile inf = createINodeFile(replication, preferredBlockSize);
-    assertEquals("True has to be returned in this case", replication,
-                 inf.getFileReplication());
+      assertEquals(replication,
+              inf.getFileReplication(), "True has to be returned in this case");
   }
 
   /**
@@ -229,8 +225,8 @@
     replication = 3;
     preferredBlockSize = 128*1024*1024;
     INodeFile inf = createINodeFile(replication, preferredBlockSize);
-   assertEquals("True has to be returned in this case", preferredBlockSize,
-        inf.getPreferredBlockSize());
+      assertEquals(preferredBlockSize,
+              inf.getPreferredBlockSize(), "True has to be returned in this case");
  }
 
   @Test
@@ -238,8 +234,8 @@
     replication = 3;
     preferredBlockSize = BLKSIZE_MAXVALUE;
     INodeFile inf = createINodeFile(replication, preferredBlockSize);
-    assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
-                 inf.getPreferredBlockSize());
+      assertEquals(BLKSIZE_MAXVALUE,
+              inf.getPreferredBlockSize(), "True has to be returned in this case");
   }
 
   /**
@@ -358,12 +354,12 @@
   @Test
   public void testConcatBlocks() {
     INodeFile origFile = createINodeFiles(1, "origfile")[0];
-    assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
+      assertEquals(origFile.numBlocks(), 1L, "Number of blocks didn't match");
 
     INodeFile[] appendFiles = createINodeFiles(4, "appendfile");
     BlockManager bm = Mockito.mock(BlockManager.class);
     origFile.concatBlocks(appendFiles, bm);
-    assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
+      assertEquals(origFile.numBlocks(), 5L, "Number of blocks didn't match");
   }
   
   /** 
@@ -1261,7 +1257,7 @@
       ContentSummary cs = dfs.getContentSummary(new Path(dir));
       QuotaUsage qu = dfs.getQuotaUsage(new Path(dir));
 
-      Assert.assertEquals(cs.getFileCount() + cs.getDirectoryCount(),
+      Assertions.assertEquals(cs.getFileCount() + cs.getDirectoryCount(),
           qu.getFileAndDirectoryCount());
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
index df36322..5ed71bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
@@ -30,8 +30,8 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 
 /**
@@ -77,11 +77,11 @@
   }
   
   private int getBlockCount() {
-    Assert.assertNotNull("Null cluster", mc);
-    Assert.assertNotNull("No Namenode in cluster", mc.getNameNode());
+      Assertions.assertNotNull(mc, "Null cluster");
+      Assertions.assertNotNull(mc.getNameNode(), "No Namenode in cluster");
     FSNamesystem namesystem = mc.getNamesystem();
-    Assert.assertNotNull("Null Namesystem in cluster", namesystem);
-    Assert.assertNotNull("Null Namesystem.blockmanager", namesystem.getBlockManager());
+      Assertions.assertNotNull(namesystem, "Null Namesystem in cluster");
+      Assertions.assertNotNull(namesystem.getBlockManager(), "Null Namesystem.blockmanager");
     return (int) namesystem.getBlocksTotal();
   }
 
@@ -146,7 +146,7 @@
     LOG.info("Deletion took " + (end - start) + "msecs");
     LOG.info("createOperations " + createOps);
     LOG.info("lockOperations " + lockOps);
-    Assert.assertTrue(lockOps + createOps > 0);
+    Assertions.assertTrue(lockOps + createOps > 0);
     threads[0].rethrow();
     threads[1].rethrow();
   }
@@ -215,9 +215,9 @@
     mc = new MiniDFSCluster.Builder(CONF).build();
     try {
       mc.waitActive();
-      Assert.assertNotNull("No Namenode in cluster", mc.getNameNode());
+        Assertions.assertNotNull(mc.getNameNode(), "No Namenode in cluster");
       createFiles();
-      Assert.assertEquals(TOTAL_BLOCKS, getBlockCount());
+      Assertions.assertEquals(TOTAL_BLOCKS, getBlockCount());
       runThreads();
     } finally {
       mc.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
index 54b401d..8cdf0a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
@@ -18,9 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
@@ -52,7 +51,6 @@
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import static org.junit.Assert.assertThat;
 import static org.mockito.Mockito.*;
 
 public class TestLeaseManager {
@@ -175,8 +173,8 @@
       // Check whether the lease manager has the lease
       dir = cluster.getNamesystem().getFSDirectory();
       file = dir.getINode(path).asFile();
-      assertTrue("Lease should exist.",
-          cluster.getNamesystem().leaseManager.getLease(file) != null);
+        assertTrue(
+                cluster.getNamesystem().leaseManager.getLease(file) != null, "Lease should exist.");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index a197c00..7d96575 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -93,18 +93,18 @@
       final NameNode namenode = cluster.getNameNode();
       Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
         getNamesystem().listCorruptFileBlocks("/", null);
-      assertEquals("Namenode has " + badFiles.size()
-          + " corrupt files. Expecting None.", 0, badFiles.size());
+        assertEquals(0, badFiles.size(), "Namenode has " + badFiles.size()
+                + " corrupt files. Expecting None.");
       assertCorruptFilesCount(cluster, badFiles.size());
 
       // Now deliberately corrupt one block
       String bpid = cluster.getNamesystem().getBlockPoolId();
       File storageDir = cluster.getInstanceStorageDir(0, 1);
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("data directory does not exist", data_dir.exists());
+        assertTrue(data_dir.exists(), "data directory does not exist");
       List<File> metaFiles = MiniDFSCluster.getAllBlockFiles(data_dir);
-      assertTrue("Data directory does not contain any blocks or there was an "
-          + "IO error", metaFiles != null && !metaFiles.isEmpty());
+        assertTrue(metaFiles != null && !metaFiles.isEmpty(), "Data directory does not contain any blocks or there was an "
+                + "IO error");
       File metaFile = metaFiles.get(0);
       RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
       FileChannel channel = file.getChannel();
@@ -122,15 +122,15 @@
       } catch (BlockMissingException e) {
         System.out.println("Received BlockMissingException as expected.");
       } catch (IOException e) {
-        assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
-            " but received IOException " + e, false);
+          assertTrue(false, "Corrupted replicas not handled properly. Expecting BlockMissingException " +
+                  " but received IOException " + e);
       }
 
       // fetch bad file list from namenode. There should be one file.
       badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null);
       LOG.info("Namenode has bad files. " + badFiles.size());
-      assertEquals("Namenode has " + badFiles.size() + " bad files. " +
-          "Expecting 1.", 1, badFiles.size());
+        assertEquals(1, badFiles.size(), "Namenode has " + badFiles.size() + " bad files. " +
+                "Expecting 1.");
       assertCorruptFilesCount(cluster, badFiles.size());
       util.cleanup(fs, "/srcdat10");
     } finally {
@@ -176,18 +176,18 @@
       // fetch bad file list from namenode. There should be none.
       Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = 
         cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null);
-      assertEquals("Namenode has " + badFiles.size()
-          + " corrupt files. Expecting None.", 0, badFiles.size());
+        assertEquals(0, badFiles.size(), "Namenode has " + badFiles.size()
+                + " corrupt files. Expecting None.");
       assertCorruptFilesCount(cluster, badFiles.size());
 
       // Now deliberately corrupt one block
       File storageDir = cluster.getInstanceStorageDir(0, 0);
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, 
           cluster.getNamesystem().getBlockPoolId());
-      assertTrue("data directory does not exist", data_dir.exists());
+        assertTrue(data_dir.exists(), "data directory does not exist");
       List<File> metaFiles = MiniDFSCluster.getAllBlockFiles(data_dir);
-      assertTrue("Data directory does not contain any blocks or there was an "
-          + "IO error", metaFiles != null && !metaFiles.isEmpty());
+        assertTrue(metaFiles != null && !metaFiles.isEmpty(), "Data directory does not contain any blocks or there was an "
+                + "IO error");
       File metaFile = metaFiles.get(0);
       RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
       FileChannel channel = file.getChannel();
@@ -205,17 +205,17 @@
       } catch (BlockMissingException e) {
         System.out.println("Received BlockMissingException as expected.");
       } catch (IOException e) {
-        assertTrue("Corrupted replicas not handled properly. " +
-                   "Expecting BlockMissingException " +
-                   " but received IOException " + e, false);
+          assertTrue(false, "Corrupted replicas not handled properly. " +
+                  "Expecting BlockMissingException " +
+                  " but received IOException " + e);
       }
 
       // fetch bad file list from namenode. There should be one file.
       badFiles = cluster.getNameNode().getNamesystem().
         listCorruptFileBlocks("/", null);
       LOG.info("Namenode has bad files. " + badFiles.size());
-      assertEquals("Namenode has " + badFiles.size() + " bad files. " +
-          "Expecting 1.", 1, badFiles.size());
+        assertEquals(1, badFiles.size(), "Namenode has " + badFiles.size() + " bad files. " +
+                "Expecting 1.");
       assertCorruptFilesCount(cluster, badFiles.size());
  
       // restart namenode
@@ -238,22 +238,22 @@
       } catch (BlockMissingException e) {
         System.out.println("Received BlockMissingException as expected.");
       } catch (IOException e) {
-        assertTrue("Corrupted replicas not handled properly. " +
-                   "Expecting BlockMissingException " +
-                   " but received IOException " + e, false);
+          assertTrue(false, "Corrupted replicas not handled properly. " +
+                  "Expecting BlockMissingException " +
+                  " but received IOException " + e);
       }
 
       // fetch bad file list from namenode. There should be one file.
       badFiles = cluster.getNameNode().getNamesystem().
         listCorruptFileBlocks("/", null);
       LOG.info("Namenode has bad files. " + badFiles.size());
-      assertEquals("Namenode has " + badFiles.size() + " bad files. " +
-          "Expecting 1.", 1, badFiles.size());
+        assertEquals(1, badFiles.size(), "Namenode has " + badFiles.size() + " bad files. " +
+                "Expecting 1.");
       assertCorruptFilesCount(cluster, badFiles.size());
 
-      // check that we are still in safe mode
-      assertTrue("Namenode is not in safe mode", 
-                 cluster.getNameNode().isInSafeMode());
+        // check that we are still in safe mode
+        assertTrue(
+                cluster.getNameNode().isInSafeMode(), "Namenode is not in safe mode");
 
       // now leave safe mode so that we can clean up
       cluster.getNameNodeRpc().setSafeMode(
@@ -310,9 +310,9 @@
           for (File metadataFile : metadataFiles) {
             File blockFile = Block.metaToBlockFile(metadataFile);
             LOG.info("Deliberately removing file " + blockFile.getName());
-            assertTrue("Cannot remove file.", blockFile.delete());
+              assertTrue(blockFile.delete(), "Cannot remove file.");
             LOG.info("Deliberately removing file " + metadataFile.getName());
-            assertTrue("Cannot remove file.", metadataFile.delete());
+              assertTrue(metadataFile.delete(), "Cannot remove file.");
             // break;
           }
         }
@@ -423,9 +423,9 @@
         for (File metadataFile : metadataFiles) {
           File blockFile = Block.metaToBlockFile(metadataFile);
           LOG.info("Deliberately removing file " + blockFile.getName());
-          assertTrue("Cannot remove file.", blockFile.delete());
+            assertTrue(blockFile.delete(), "Cannot remove file.");
           LOG.info("Deliberately removing file " + metadataFile.getName());
-          assertTrue("Cannot remove file.", metadataFile.delete());
+            assertTrue(metadataFile.delete(), "Cannot remove file.");
           // break;
         }
       }
@@ -483,9 +483,9 @@
       final NameNode namenode = cluster.getNameNode();
       Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
         getNamesystem().listCorruptFileBlocks("/srcdat2", null);
-      assertEquals(
-          "Namenode has " + badFiles.size() + " corrupt files. Expecting none.",
-          0, badFiles.size());
+        assertEquals(
+                0, badFiles.size(),
+                "Namenode has " + badFiles.size() + " corrupt files. Expecting none.");
       assertCorruptFilesCount(cluster, badFiles.size());
 
       // Now deliberately blocks from all files
@@ -501,8 +501,8 @@
             continue;
           for (File metadataFile : metadataFiles) {
             File blockFile = Block.metaToBlockFile(metadataFile);
-            assertTrue("Cannot remove file.", blockFile.delete());
-            assertTrue("Cannot remove file.", metadataFile.delete());
+              assertTrue(blockFile.delete(), "Cannot remove file.");
+              assertTrue(metadataFile.delete(), "Cannot remove file.");
           }
         }
       }
@@ -530,19 +530,19 @@
       badFiles = namenode.getNamesystem().
         listCorruptFileBlocks("/srcdat2", null); 
       LOG.info("Namenode has bad files. " + badFiles.size());
-      assertEquals("Namenode has " + badFiles.size() + " bad files. " +
-          "Expecting " + maxCorruptFileBlocks + ".", maxCorruptFileBlocks,
-          badFiles.size());
+        assertEquals(maxCorruptFileBlocks,
+                badFiles.size(), "Namenode has " + badFiles.size() + " bad files. " +
+                "Expecting " + maxCorruptFileBlocks + ".");
 
       CorruptFileBlockIterator iter = (CorruptFileBlockIterator)
         fs.listCorruptFileBlocks(new Path("/srcdat2"));
       int corruptPaths = countPaths(iter);
-      assertTrue("Expected more than " + maxCorruptFileBlocks +
-                 " corrupt file blocks but got " + corruptPaths,
-                 corruptPaths > maxCorruptFileBlocks);
-      assertTrue("Iterator should have made more than 1 call but made " +
-                 iter.getCallsMade(),
-                 iter.getCallsMade() > 1);
+        assertTrue(
+                corruptPaths > maxCorruptFileBlocks, "Expected more than " + maxCorruptFileBlocks +
+                " corrupt file blocks but got " + corruptPaths);
+        assertTrue(
+                iter.getCallsMade() > 1, "Iterator should have made more than 1 call but made " +
+                iter.getCallsMade());
 
       util.cleanup(fs, "/srcdat2");
     } finally {
@@ -591,9 +591,9 @@
         for (File metadataFile : metadataFiles) {
           File blockFile = Block.metaToBlockFile(metadataFile);
           LOG.info("Deliberately removing file " + blockFile.getName());
-          assertTrue("Cannot remove file.", blockFile.delete());
+            assertTrue(blockFile.delete(), "Cannot remove file.");
           LOG.info("Deliberately removing file " + metadataFile.getName());
-          assertTrue("Cannot remove file.", metadataFile.delete());
+            assertTrue(metadataFile.delete(), "Cannot remove file.");
         }
       }
 
@@ -610,7 +610,7 @@
       }
       // Validate we get all the corrupt files
       LOG.info("Namenode has bad files. " + numCorrupt);
-      assertEquals("Failed to get corrupt files!", 3, numCorrupt);
+        assertEquals(3, numCorrupt, "Failed to get corrupt files!");
 
       util.cleanup(fs, "corruptData");
     } finally {
@@ -628,7 +628,7 @@
   private void assertCorruptFilesCount(MiniDFSCluster cluster,
       int expectedCorrupt) {
     FSNamesystem fs = cluster.getNameNode().getNamesystem();
-    assertEquals("Incorrect number of corrupt files returned", expectedCorrupt,
-        fs.getCorruptFilesCount());
+      assertEquals(expectedCorrupt,
+              fs.getCorruptFilesCount(), "Incorrect number of corrupt files returned");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
index c60a136..0f5464f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -55,10 +53,10 @@
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ChunkedArrayList;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
-import org.junit.Assert;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Verify open files listing.
@@ -72,7 +70,7 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(TestListOpenFiles.class);
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
@@ -85,7 +83,7 @@
     nnRpc = cluster.getNameNodeRpc();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (fs != null) {
       fs.close();
@@ -104,13 +102,13 @@
     BatchedEntries<OpenFileEntry> openFileEntryBatchedEntries =
         nnRpc.listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
             OpenFilesIterator.FILTER_PATH_DEFAULT);
-    assertTrue("Open files list should be empty!",
-        openFileEntryBatchedEntries.size() == 0);
+      assertTrue(
+              openFileEntryBatchedEntries.size() == 0, "Open files list should be empty!");
     BatchedEntries<OpenFileEntry> openFilesBlockingDecomEntries =
         nnRpc.listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION),
             OpenFilesIterator.FILTER_PATH_DEFAULT);
-    assertTrue("Open files list blocking decommission should be empty!",
-        openFilesBlockingDecomEntries.size() == 0);
+      assertTrue(
+              openFilesBlockingDecomEntries.size() == 0, "Open files list blocking decommission should be empty!");
 
     openFiles.putAll(
         DFSTestUtil.createOpenFiles(fs, "open-1", 1));
@@ -146,18 +144,18 @@
         batchedEntries = nnRpc.listOpenFiles(lastEntry.getId(),
             openFilesTypes, path);
       }
-      assertTrue("Incorrect open files list size!",
-          batchedEntries.size() <= BATCH_SIZE);
+        assertTrue(
+                batchedEntries.size() <= BATCH_SIZE, "Incorrect open files list size!");
       for (int i = 0; i < batchedEntries.size(); i++) {
         lastEntry = batchedEntries.get(i);
         String filePath = lastEntry.getFilePath();
         LOG.info("OpenFile: " + filePath);
-        assertTrue("Unexpected open file: " + filePath,
-            remainingFiles.remove(new Path(filePath)));
+          assertTrue(
+                  remainingFiles.remove(new Path(filePath)), "Unexpected open file: " + filePath);
       }
     } while (batchedEntries.hasMore());
-    assertTrue(remainingFiles.size() + " open files not listed!",
-        remainingFiles.size() == 0);
+      assertTrue(
+              remainingFiles.size() == 0, remainingFiles.size() + " open files not listed!");
   }
 
   /**
@@ -258,7 +256,7 @@
           new String[] {"-listOpenFiles"}));
       assertEquals(0, ToolRunner.run(dfsAdmin,
           new String[] {"-listOpenFiles", "-blockingDecommission"}));
-      assertFalse("Client Error!", listOpenFilesError.get());
+        assertFalse(listOpenFilesError.get(), "Client Error!");
 
       clientThread.join();
     } finally {
@@ -277,13 +275,13 @@
     BatchedEntries<OpenFileEntry> openFileEntryBatchedEntries = nnRpc
         .listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
             OpenFilesIterator.FILTER_PATH_DEFAULT);
-    assertTrue("Open files list should be empty!",
-        openFileEntryBatchedEntries.size() == 0);
+      assertTrue(
+              openFileEntryBatchedEntries.size() == 0, "Open files list should be empty!");
     BatchedEntries<OpenFileEntry> openFilesBlockingDecomEntries = nnRpc
         .listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION),
             OpenFilesIterator.FILTER_PATH_DEFAULT);
-    assertTrue("Open files list blocking decommission should be empty!",
-        openFilesBlockingDecomEntries.size() == 0);
+      assertTrue(
+              openFilesBlockingDecomEntries.size() == 0, "Open files list blocking decommission should be empty!");
 
     openFiles.putAll(
         DFSTestUtil.createOpenFiles(fs, new Path("/base"), "open-1", 1));
@@ -347,7 +345,7 @@
       assertEquals(0, openFileEntryBatchedEntries.size());
       fsNamesystem.leaseManager.removeLease(dir.getINode(path).getId());
     } catch (NullPointerException e) {
-      Assert.fail("Should not throw NPE when the file is deleted but has lease!");
+      Assertions.fail("Should not throw NPE when the file is deleted but has lease!");
     } finally {
       fsNamesystem.writeUnlock();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java
index 0c71445..cf46ed7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java
@@ -22,17 +22,17 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 
-import static org.junit.Assert.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestMalformedURLs {
   private MiniDFSCluster cluster;
   Configuration config;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     Configuration.addDefaultResource("hdfs-site.malformed.xml");
     config = new Configuration();
@@ -50,7 +50,7 @@
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
index c88570b..99b35f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.BufferedReader;
 import java.io.DataInputStream;
@@ -43,10 +41,10 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the creation and validation of metasave
@@ -59,7 +57,7 @@
   private static FileSystem fileSys = null;
   private static NamenodeProtocols nnRpc = null;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     // start a cluster
     Configuration conf = new HdfsConfiguration();
@@ -104,7 +102,7 @@
     try {
       reader = new BufferedReader(new InputStreamReader(in));
       String line = reader.readLine();
-      Assert.assertEquals(
+      Assertions.assertEquals(
           "3 files and directories, 2 blocks = 5 total filesystem objects",
           line);
       line = reader.readLine();
@@ -275,7 +273,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (fileSys != null)
       fileSys.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
index e1663e2..a39ab5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -26,8 +26,8 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.util.ExitUtil;
 
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -43,7 +43,7 @@
   private MiniDFSCluster dfsCluster = null;
   private final Configuration conf = new Configuration();
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (dfsCluster != null) {
       dfsCluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
index 1809942..ee1f841 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
@@ -21,7 +21,7 @@
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
 import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,9 +34,9 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.junit.Test;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
+import org.junit.jupiter.api.Test;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
index c0f0970..c37ca92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
@@ -45,9 +45,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
 import org.apache.hadoop.util.Lists;
 
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 
@@ -62,7 +62,7 @@
    * For the purpose of this test, purge as many edits as we can 
    * with no extra "safety cushion"
    */
-  @Before
+  @BeforeEach
   public void setNoExtraEditRetention() {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
   }
@@ -310,27 +310,27 @@
     for (FSImageFile captured : imagesPurgedCaptor.getAllValues()) {
       capturedPaths.add(fileToPath(captured.getFile()));
     }
-    Assert.assertEquals("Image file check.",
-      Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)),
-      Joiner.on(",").join(capturedPaths));
+      Assertions.assertEquals(
+              Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)),
+              Joiner.on(",").join(capturedPaths), "Image file check.");
 
     capturedPaths.clear();
     // Check edit logs, and also in progress edits older than minTxIdToKeep
     for (EditLogFile captured : logsPurgedCaptor.getAllValues()) {
       capturedPaths.add(fileToPath(captured.getFile()));
     }
-    Assert.assertEquals("Check old edits are removed.",
-      Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)),
-      Joiner.on(",").join(capturedPaths));
+      Assertions.assertEquals(
+              Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)),
+              Joiner.on(",").join(capturedPaths), "Check old edits are removed.");
 
     capturedPaths.clear();
     // Check in progress edits to keep are marked as stale
     for (EditLogFile captured : staleLogsCaptor.getAllValues()) {
       capturedPaths.add(fileToPath(captured.getFile()));
     }
-    Assert.assertEquals("Check unnecessary but kept edits are marked stale",
-      Joiner.on(",").join(filesToPaths(tc.expectedStaleLogs)),
-      Joiner.on(",").join(capturedPaths));
+      Assertions.assertEquals(
+              Joiner.on(",").join(filesToPaths(tc.expectedStaleLogs)),
+              Joiner.on(",").join(capturedPaths), "Check unnecessary but kept edits are marked stale");
   }
 
   private class TestCaseDescription {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
index 44bf5b7..00bbafc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
@@ -29,19 +29,19 @@
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.util.ExitUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestNNThroughputBenchmark {
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() {
     ExitUtil.disableSystemExit();
   }
 
-  @After
+  @AfterEach
   public void cleanUp() {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
   }
@@ -153,10 +153,10 @@
       listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
       HdfsFileStatus[] partialListingAfter = listing.getPartialListing();
 
-      Assert.assertEquals(partialListing.length, partialListingAfter.length);
+      Assertions.assertEquals(partialListing.length, partialListingAfter.length);
       for (int i = 0; i < partialListing.length; i++) {
         //Check the modification time after append operation
-        Assert.assertNotEquals(partialListing[i].getModificationTime(),
+        Assertions.assertNotEquals(partialListing[i].getModificationTime(),
             partialListingAfter[i].getModificationTime());
       }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
index e32e77e..b61d6d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
@@ -17,12 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import org.junit.jupiter.api.Test;
 
-import org.junit.Test;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test for {@link NameCache} class
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
index 7071c66..59239bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -38,9 +35,8 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
 
@@ -63,7 +59,7 @@
   private final File base_dir = new File(
       PathUtils.getTestDir(TestNameEditsConfigs.class), "dfs");
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     if(base_dir.exists() && !FileUtil.fullyDelete(base_dir)) {
       throw new IOException("Cannot remove directory " + base_dir);
@@ -77,17 +73,17 @@
     FSImageTransactionalStorageInspector ins = inspect(dir);
 
     if (shouldHaveImages) {
-      assertTrue("Expect images in " + dir, ins.foundImages.size() > 0);
+        assertTrue(ins.foundImages.size() > 0, "Expect images in " + dir);
     } else {
-      assertTrue("Expect no images in " + dir, ins.foundImages.isEmpty());      
+        assertTrue(ins.foundImages.isEmpty(), "Expect no images in " + dir);      
     }
 
     List<FileJournalManager.EditLogFile> editlogs 
       = FileJournalManager.matchEditLogs(new File(dir, "current").listFiles()); 
     if (shouldHaveEdits) {
-      assertTrue("Expect edits in " + dir, editlogs.size() > 0);
+        assertTrue(editlogs.size() > 0, "Expect edits in " + dir);
     } else {
-      assertTrue("Expect no edits in " + dir, editlogs.isEmpty());
+        assertTrue(editlogs.isEmpty(), "Expect no edits in " + dir);
     }
   }
 
@@ -95,9 +91,9 @@
       throws IOException {
     assertTrue(fileSys.exists(name));
     int replication = fileSys.getFileStatus(name).getReplication();
-    assertEquals("replication for " + name, repl, replication);
+      assertEquals(repl, replication, "replication for " + name);
     long size = fileSys.getContentSummary(name).getLength();
-    assertEquals("file size for " + name, size, FILE_SIZE);
+      assertEquals(size, FILE_SIZE, "file size for " + name);
   }
 
   private void cleanupFile(FileSystem fileSys, Path name)
@@ -602,14 +598,14 @@
       cluster.waitActive();
       secondary = startSecondaryNameNode(conf);
       secondary.doCheckpoint();
-      assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
-          checkpointNameDir1.exists());
-      assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
-          checkpointNameDir2.exists());
-      assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
-          + " must be trimmed ", checkpointEditsDir1.exists());
-      assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
-          + " must be trimmed ", checkpointEditsDir2.exists());
+        assertTrue(
+                checkpointNameDir1.exists(), DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ");
+        assertTrue(
+                checkpointNameDir2.exists(), DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ");
+        assertTrue(checkpointEditsDir1.exists(), DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+                + " must be trimmed ");
+        assertTrue(checkpointEditsDir2.exists(), DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+                + " must be trimmed ");
     } finally {
       secondary.shutdown();
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java
index 6a36c98..fcf40c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Tests NameNode interaction for all ACL modification APIs.  This test suite
@@ -26,7 +26,7 @@
  */
 public class TestNameNodeAcl extends FSAclBaseTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new Configuration();
     startCluster();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeConfiguration.java
index c42c3d9..9d98dff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeConfiguration.java
@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.util.HashSet;
 import java.util.Set;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestNameNodeConfiguration {
 
@@ -33,8 +33,8 @@
   public void testNameNodeSpecificKeys() {
     Set<String> keySet = new HashSet<>();
     for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
-      assertTrue("Duplicate key: " + key
-          + " in NameNode.NAMENODE_SPECIFIC_KEYS.", keySet.add(key));
+        assertTrue(keySet.add(key), "Duplicate key: " + key
+                + " in NameNode.NAMENODE_SPECIFIC_KEYS.");
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
index 27efea6..0b837a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
@@ -33,10 +33,10 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -64,7 +64,7 @@
     this.policy = policy;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);
@@ -81,7 +81,7 @@
         KeyStoreTestUtil.getServerSSLConfigFileName());
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     FileUtil.fullyDelete(new File(BASEDIR));
     KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
@@ -98,14 +98,14 @@
       server = new NameNodeHttpServer(conf, null, addr);
       server.start();
 
-      Assert.assertTrue(implies(policy.isHttpEnabled(),
+      Assertions.assertTrue(implies(policy.isHttpEnabled(),
           canAccess("http", server.getHttpAddress())));
-      Assert.assertTrue(implies(!policy.isHttpEnabled(),
+      Assertions.assertTrue(implies(!policy.isHttpEnabled(),
           server.getHttpAddress() == null));
 
-      Assert.assertTrue(implies(policy.isHttpsEnabled(),
+      Assertions.assertTrue(implies(policy.isHttpsEnabled(),
           canAccess("https", server.getHttpsAddress())));
-      Assert.assertTrue(implies(!policy.isHttpsEnabled(),
+      Assertions.assertTrue(implies(!policy.isHttpsEnabled(),
           server.getHttpsAddress() == null));
 
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
index aaa713e..a99d7aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
@@ -23,9 +23,9 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 import java.io.IOException;
@@ -48,7 +48,7 @@
 
   public static URL getServerURL(HttpServer2 server)
       throws MalformedURLException {
-    Assert.assertNotNull("No server", server);
+      Assertions.assertNotNull(server, "No server");
     return new URL("http://"
         + NetUtils.getHostPortString(server.getConnectorAddress(0)));
   }
@@ -57,9 +57,9 @@
   public void testNameNodeXFrameOptionsEnabled() throws Exception {
     HttpURLConnection conn = createServerwithXFrame(true, null);
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
-    Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
-        xfoHeader != null);
-    Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
+      Assertions.assertTrue(
+              xfoHeader != null, "X-FRAME-OPTIONS is absent in the header");
+    Assertions.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
         .SAMEORIGIN.toString()));
   }
 
@@ -67,7 +67,7 @@
   public void testNameNodeXFrameOptionsDisabled() throws Exception {
     HttpURLConnection conn = createServerwithXFrame(false, null);
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
-    Assert.assertTrue("unexpected X-FRAME-OPTION in header", xfoHeader == null);
+      Assertions.assertTrue(xfoHeader == null, "unexpected X-FRAME-OPTION in header");
   }
 
   @Test
@@ -111,9 +111,9 @@
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     conn.connect();
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
-    Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
-        xfoHeader != null);
-    Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
+      Assertions.assertTrue(
+              xfoHeader != null, "X-FRAME-OPTIONS is absent in the header");
+    Assertions.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
         .SAMEORIGIN.toString()));
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 81c9cb8..f8b3ea7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -56,8 +56,8 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.eclipse.jetty.util.ajax.JSON;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -77,12 +77,7 @@
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.util.Shell.getMemlockLimit;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Class for testing {@link NameNodeMXBean} implementation
@@ -223,31 +218,31 @@
       // get attribute NodeUsage
       String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
           "NodeUsage"));
-      assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage);
+        assertEquals(fsn.getNodeUsage(), nodeUsage, "Bad value for NodeUsage");
       // get attribute NameJournalStatus
       String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName,
           "NameJournalStatus"));
-      assertEquals("Bad value for NameJournalStatus",
-          fsn.getNameJournalStatus(), nameJournalStatus);
+        assertEquals(
+                fsn.getNameJournalStatus(), nameJournalStatus, "Bad value for NameJournalStatus");
       // get attribute JournalTransactionInfo
       String journalTxnInfo = (String) mbs.getAttribute(mxbeanName,
           "JournalTransactionInfo");
-      assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(),
-          journalTxnInfo);
+        assertEquals(fsn.getJournalTransactionInfo(),
+                journalTxnInfo, "Bad value for NameTxnIds");
       // get attribute "CompileInfo"
       String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
-      assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(),
-          compileInfo);
+        assertEquals(fsn.getCompileInfo(),
+                compileInfo, "Bad value for CompileInfo");
       // get attribute CorruptFiles
       String corruptFiles = (String) (mbs.getAttribute(mxbeanName,
           "CorruptFiles"));
-      assertEquals("Bad value for CorruptFiles", fsn.getCorruptFiles(),
-          corruptFiles);
+        assertEquals(fsn.getCorruptFiles(),
+                corruptFiles, "Bad value for CorruptFiles");
       // get attribute CorruptFilesCount
       int corruptFilesCount = (int) (mbs.getAttribute(mxbeanName,
           "CorruptFilesCount"));
-      assertEquals("Bad value for CorruptFilesCount",
-          fsn.getCorruptFilesCount(), corruptFilesCount);
+        assertEquals(
+                fsn.getCorruptFilesCount(), corruptFilesCount, "Bad value for CorruptFilesCount");
       // get attribute NameDirStatuses
       String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
           "NameDirStatuses"));
@@ -289,8 +284,8 @@
       assertEquals(maxLockedMemory *
           cluster.getDataNodes().size(),
               mbs.getAttribute(mxbeanName, "CacheCapacity"));
-      assertNull("RollingUpgradeInfo should be null when there is no rolling"
-          + " upgrade", mbs.getAttribute(mxbeanName, "RollingUpgradeStatus"));
+        assertNull(mbs.getAttribute(mxbeanName, "RollingUpgradeStatus"), "RollingUpgradeInfo should be null when there is no rolling"
+                + " upgrade");
     } finally {
       if (cluster != null) {
         for (URI dir : cluster.getNameDirs(0)) {
@@ -675,15 +670,15 @@
           (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
       ObjectMapper mapper = new ObjectMapper();
       Map<String, Object> map = mapper.readValue(topUsers, Map.class);
-      assertTrue("Could not find map key timestamp", 
-          map.containsKey("timestamp"));
-      assertTrue("Could not find map key windows", map.containsKey("windows"));
+        assertTrue(
+                map.containsKey("timestamp"), "Could not find map key timestamp");
+        assertTrue(map.containsKey("windows"), "Could not find map key windows");
       List<Map<String, List<Map<String, Object>>>> windows =
           (List<Map<String, List<Map<String, Object>>>>) map.get("windows");
-      assertEquals("Unexpected num windows", 3, windows.size());
+        assertEquals(3, windows.size(), "Unexpected num windows");
       for (Map<String, List<Map<String, Object>>> window : windows) {
         final List<Map<String, Object>> ops = window.get("ops");
-        assertEquals("Unexpected num ops", 4, ops.size());
+          assertEquals(4, ops.size(), "Unexpected num ops");
         for (Map<String, Object> op: ops) {
           if (op.get("opType").equals("datanodeReport")) {
             continue;
@@ -698,7 +693,7 @@
           } else {
             expected = NUM_OPS;
           }
-          assertEquals("Unexpected total count", expected, count);
+            assertEquals(expected, count, "Unexpected total count");
         }
       }
     } finally {
@@ -729,7 +724,7 @@
       }
       String topUsers =
           (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
-      assertNull("Did not expect to find TopUserOpCounts bean!", topUsers);
+        assertNull(topUsers, "Did not expect to find TopUserOpCounts bean!");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -758,7 +753,7 @@
       }
       String topUsers =
           (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
-      assertNotNull("Expected TopUserOpCounts bean!", topUsers);
+        assertNotNull(topUsers, "Expected TopUserOpCounts bean!");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -870,21 +865,21 @@
       final String defaultPolicyName = defaultPolicy.getName();
       final String rs104PolicyName = "RS-10-4-1024k";
 
-      assertEquals("Enabled EC policies metric should return with " +
-          "the default EC policy", defaultPolicyName,
-          getEnabledEcPoliciesMetric());
+        assertEquals(defaultPolicyName,
+                getEnabledEcPoliciesMetric(), "Enabled EC policies metric should return with " +
+                "the default EC policy");
 
       fs.enableErasureCodingPolicy(rs104PolicyName);
-      assertEquals("Enabled EC policies metric should return with " +
-              "both enabled policies separated by a comma",
-          rs104PolicyName + ", " + defaultPolicyName,
-          getEnabledEcPoliciesMetric());
+        assertEquals(
+                rs104PolicyName + ", " + defaultPolicyName,
+                getEnabledEcPoliciesMetric(), "Enabled EC policies metric should return with " +
+                "both enabled policies separated by a comma");
 
       fs.disableErasureCodingPolicy(defaultPolicyName);
       fs.disableErasureCodingPolicy(rs104PolicyName);
-      assertEquals("Enabled EC policies metric should return with " +
-          "an empty string if there is no enabled policy",
-          "", getEnabledEcPoliciesMetric());
+        assertEquals(
+                "", getEnabledEcPoliciesMetric(), "Enabled EC policies metric should return with " +
+                "an empty string if there is no enabled policy");
     } finally {
       fs.close();
       cluster.shutdown();
@@ -968,7 +963,7 @@
               return true;
             }
           } catch (Exception e) {
-            Assert.fail("Caught unexpected exception.");
+            Assertions.fail("Caught unexpected exception.");
           }
           return false;
         }
@@ -985,13 +980,13 @@
       Long ecMissingBlocks =
           (Long) mbs.getAttribute(ecBlkGrpStateMBeanName,
               "MissingECBlockGroups");
-      assertEquals("Unexpected total missing blocks!",
-          expectedMissingBlockCount, totalMissingBlocks);
-      assertEquals("Unexpected total missing blocks!",
-          totalMissingBlocks,
-          (replicaMissingBlocks + ecMissingBlocks));
-      assertEquals("Unexpected total ec missing blocks!",
-          expectedMissingBlockCount, ecMissingBlocks.longValue());
+        assertEquals(
+                expectedMissingBlockCount, totalMissingBlocks, "Unexpected total missing blocks!");
+        assertEquals(
+                totalMissingBlocks,
+                (replicaMissingBlocks + ecMissingBlocks), "Unexpected total missing blocks!");
+        assertEquals(
+                expectedMissingBlockCount, ecMissingBlocks.longValue(), "Unexpected total ec missing blocks!");
 
       // Verification of corrupt blocks
       long totalCorruptBlocks =
@@ -1002,13 +997,13 @@
       Long ecCorruptBlocks =
           (Long) mbs.getAttribute(ecBlkGrpStateMBeanName,
               "CorruptECBlockGroups");
-      assertEquals("Unexpected total corrupt blocks!",
-          expectedCorruptBlockCount, totalCorruptBlocks);
-      assertEquals("Unexpected total corrupt blocks!",
-          totalCorruptBlocks,
-          (replicaCorruptBlocks + ecCorruptBlocks));
-      assertEquals("Unexpected total ec corrupt blocks!",
-          expectedCorruptBlockCount, ecCorruptBlocks.longValue());
+        assertEquals(
+                expectedCorruptBlockCount, totalCorruptBlocks, "Unexpected total corrupt blocks!");
+        assertEquals(
+                totalCorruptBlocks,
+                (replicaCorruptBlocks + ecCorruptBlocks), "Unexpected total corrupt blocks!");
+        assertEquals(
+                expectedCorruptBlockCount, ecCorruptBlocks.longValue(), "Unexpected total ec corrupt blocks!");
 
       String corruptFiles = (String) (mbs.getAttribute(namenodeMXBeanName,
           "CorruptFiles"));
@@ -1114,8 +1109,8 @@
       throws Exception {
     long expectedTotalBlocks = expectedTotalReplicatedBlocks
         + expectedTotalECBlockGroups;
-    assertEquals("Unexpected total blocks!", expectedTotalBlocks,
-        actualTotalBlocks);
+      assertEquals(expectedTotalBlocks,
+              actualTotalBlocks, "Unexpected total blocks!");
 
     MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
     ObjectName replStateMBeanName = new ObjectName(
@@ -1126,10 +1121,10 @@
         "TotalReplicatedBlocks");
     Long totalECBlockGroups = (Long) mbs.getAttribute(ecBlkGrpStateMBeanName,
         "TotalECBlockGroups");
-    assertEquals("Unexpected total replicated blocks!",
-        expectedTotalReplicatedBlocks, totalReplicaBlocks.longValue());
-    assertEquals("Unexpected total ec block groups!",
-        expectedTotalECBlockGroups, totalECBlockGroups.longValue());
+      assertEquals(
+              expectedTotalReplicatedBlocks, totalReplicaBlocks.longValue(), "Unexpected total replicated blocks!");
+      assertEquals(
+              expectedTotalECBlockGroups, totalECBlockGroups.longValue(), "Unexpected total ec block groups!");
     verifyEcClusterSetupVerifyResult(mbs);
   }
 
@@ -1152,8 +1147,8 @@
     Boolean isSupported = Boolean.parseBoolean(resultMap.get("isSupported"));
     String resultMessage = resultMap.get("resultMessage");
 
-    assertFalse("Test cluster does not support all enabled " +
-        "erasure coding policies.", isSupported);
+      assertFalse(isSupported, "Test cluster does not support all enabled " +
+              "erasure coding policies.");
     assertTrue(resultMessage.contains("3 racks are required for " +
         "the erasure coding policies: RS-6-3-1024k. " +
         "The number of racks is only 1."));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
index 3e80091..d1c8acf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
@@ -32,15 +32,15 @@
 
 import java.util.function.Supplier;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.io.OutputStream;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestNameNodeMetadataConsistency {
   private static final Path filePath1 = new Path("/testdata1.txt");
@@ -52,7 +52,7 @@
   MiniDFSCluster cluster;
   HdfsConfiguration conf;
 
-  @Before
+  @BeforeEach
   public void InitTest() throws IOException {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
@@ -62,7 +62,7 @@
         .build();
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
index 9b5e988..f9fed18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
@@ -32,7 +32,7 @@
 import org.apache.log4j.AsyncAppender;
 import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
@@ -42,7 +42,7 @@
 import java.util.regex.Pattern;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java
index 92b96a5..e2edca5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java
@@ -18,15 +18,13 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 public class TestNameNodeOptionParsing {
 
@@ -132,7 +130,7 @@
       final String[] args = {"-rollingUpgrade", "foo"};
       try {
         NameNode.parseArguments(args);
-        Assert.fail();
+        Assertions.fail();
       } catch(IllegalArgumentException iae) {
         // the exception is expected.
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index ada7c82..1d8338e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -21,11 +21,11 @@
 import java.io.IOException;
 
 import org.junit.Test;
-import org.junit.Before;
-import org.junit.After;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -61,7 +61,7 @@
   private MiniDFSCluster cluster;
   private final int customizedBlockInvalidateLimit = 500;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFS_BLOCK_INVALIDATE_LIMIT_KEY,
@@ -91,22 +91,22 @@
     // revert to default
     nameNode.reconfigureProperty(HADOOP_CALLER_CONTEXT_ENABLED_KEY, null);
 
-    // verify default
-    assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", false,
-        nameSystem.getCallerContextEnabled());
-    assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value", null,
-        nameNode.getConf().get(HADOOP_CALLER_CONTEXT_ENABLED_KEY));
+      // verify default
+      assertEquals(false,
+              nameSystem.getCallerContextEnabled(), HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value");
+      assertEquals(null,
+              nameNode.getConf().get(HADOOP_CALLER_CONTEXT_ENABLED_KEY), HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value");
   }
 
   void verifyReconfigureCallerContextEnabled(final NameNode nameNode,
       final FSNamesystem nameSystem, boolean expected) {
-    assertEquals(HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value",
-        expected, nameNode.getNamesystem().getCallerContextEnabled());
-    assertEquals(
-        HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value",
-        expected,
-        nameNode.getConf().getBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY,
-            HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT));
+      assertEquals(
+              expected, nameNode.getNamesystem().getCallerContextEnabled(), HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value");
+      assertEquals(
+              expected,
+              nameNode.getConf().getBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY,
+                      HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT),
+              HADOOP_CALLER_CONTEXT_ENABLED_KEY + " has wrong value");
   }
 
   /**
@@ -136,18 +136,18 @@
 
     // revert to default
     nameNode.reconfigureProperty(ipcClientRPCBackoffEnable, null);
-    assertEquals(ipcClientRPCBackoffEnable + " has wrong value", false,
-        nnrs.getClientRpcServer().isClientBackoffEnabled());
-    assertEquals(ipcClientRPCBackoffEnable + " has wrong value", null,
-        nameNode.getConf().get(ipcClientRPCBackoffEnable));
+      assertEquals(false,
+              nnrs.getClientRpcServer().isClientBackoffEnabled(), ipcClientRPCBackoffEnable + " has wrong value");
+      assertEquals(null,
+              nameNode.getConf().get(ipcClientRPCBackoffEnable), ipcClientRPCBackoffEnable + " has wrong value");
   }
 
   void verifyReconfigureIPCBackoff(final NameNode nameNode,
       final NameNodeRpcServer nnrs, String property, boolean expected) {
-    assertEquals(property + " has wrong value", expected, nnrs
-        .getClientRpcServer().isClientBackoffEnabled());
-    assertEquals(property + " has wrong value", expected, nameNode.getConf()
-        .getBoolean(property, IPC_BACKOFF_ENABLE_DEFAULT));
+      assertEquals(expected, nnrs
+              .getClientRpcServer().isClientBackoffEnabled(), property + " has wrong value");
+      assertEquals(expected, nameNode.getConf()
+              .getBoolean(property, IPC_BACKOFF_ENABLE_DEFAULT), property + " has wrong value");
   }
 
   /**
@@ -178,52 +178,52 @@
       assertTrue(expected.getCause() instanceof NumberFormatException);
     }
 
-    // verify change
-    assertEquals(
-        DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value",
-        6,
-        nameNode.getConf().getLong(DFS_HEARTBEAT_INTERVAL_KEY,
-            DFS_HEARTBEAT_INTERVAL_DEFAULT));
-    assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", 6,
-        datanodeManager.getHeartbeatInterval());
+      // verify change
+      assertEquals(
+              6,
+              nameNode.getConf().getLong(DFS_HEARTBEAT_INTERVAL_KEY,
+                      DFS_HEARTBEAT_INTERVAL_DEFAULT),
+              DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
+      assertEquals(6,
+              datanodeManager.getHeartbeatInterval(), DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
 
-    assertEquals(
-        DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY + " has wrong value",
-        10 * 60 * 1000,
-        nameNode.getConf().getInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
-            DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT));
-    assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY
-        + " has wrong value", 10 * 60 * 1000,
-        datanodeManager.getHeartbeatRecheckInterval());
+      assertEquals(
+              10 * 60 * 1000,
+              nameNode.getConf().getInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+                      DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT),
+              DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY + " has wrong value");
+      assertEquals(10 * 60 * 1000,
+              datanodeManager.getHeartbeatRecheckInterval(), DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY
+              + " has wrong value");
 
     // change to a value with time unit
     nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, "1m");
 
-    assertEquals(
-        DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value",
-        60,
-        nameNode.getConf().getLong(DFS_HEARTBEAT_INTERVAL_KEY,
-            DFS_HEARTBEAT_INTERVAL_DEFAULT));
-    assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", 60,
-        datanodeManager.getHeartbeatInterval());
+      assertEquals(
+              60,
+              nameNode.getConf().getLong(DFS_HEARTBEAT_INTERVAL_KEY,
+                      DFS_HEARTBEAT_INTERVAL_DEFAULT),
+              DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
+      assertEquals(60,
+              datanodeManager.getHeartbeatInterval(), DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
 
     // revert to defaults
     nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, null);
     nameNode.reconfigureProperty(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
         null);
 
-    // verify defaults
-    assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", null,
-        nameNode.getConf().get(DFS_HEARTBEAT_INTERVAL_KEY));
-    assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value",
-        DFS_HEARTBEAT_INTERVAL_DEFAULT, datanodeManager.getHeartbeatInterval());
+      // verify defaults
+      assertEquals(null,
+              nameNode.getConf().get(DFS_HEARTBEAT_INTERVAL_KEY), DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
+      assertEquals(
+              DFS_HEARTBEAT_INTERVAL_DEFAULT, datanodeManager.getHeartbeatInterval(), DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
 
-    assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY
-        + " has wrong value", null,
-        nameNode.getConf().get(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY));
-    assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY
-        + " has wrong value", DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT,
-        datanodeManager.getHeartbeatRecheckInterval());
+      assertEquals(null,
+              nameNode.getConf().get(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY), DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY
+              + " has wrong value");
+      assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT,
+              datanodeManager.getHeartbeatRecheckInterval(), DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY
+              + " has wrong value");
   }
 
   /**
@@ -251,17 +251,17 @@
     nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
         StoragePolicySatisfierMode.EXTERNAL.toString());
 
-    // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
-    assertNull("SPS shouldn't start as "
-        + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled",
-            nameNode.getNamesystem().getBlockManager().getSPSManager());
+      // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
+      assertNull(
+              nameNode.getNamesystem().getBlockManager().getSPSManager(), "SPS shouldn't start as "
+              + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled");
     verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
         StoragePolicySatisfierMode.EXTERNAL, false);
 
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-        StoragePolicySatisfierMode.EXTERNAL.toString(), nameNode.getConf()
-            .get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-            DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT));
+      assertEquals(
+              StoragePolicySatisfierMode.EXTERNAL.toString(), nameNode.getConf()
+              .get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+                      DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT), DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value");
   }
 
   /**
@@ -295,13 +295,13 @@
     // enable external SPS
     nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
         StoragePolicySatisfierMode.EXTERNAL.toString());
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-        false, nameNode.getNamesystem().getBlockManager().getSPSManager()
-            .isSatisfierRunning());
-    assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-        StoragePolicySatisfierMode.EXTERNAL.toString(),
-        nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
-            DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT));
+      assertEquals(
+              false, nameNode.getNamesystem().getBlockManager().getSPSManager()
+              .isSatisfierRunning(), DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value");
+      assertEquals(
+              StoragePolicySatisfierMode.EXTERNAL.toString(),
+              nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+                      DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT), DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value");
   }
 
   /**
@@ -340,11 +340,11 @@
             .getNamesystem().getBlockManager().getSPSManager();
     boolean isSPSRunning = spsMgr != null ? spsMgr.isSatisfierRunning()
         : false;
-    assertEquals(property + " has wrong value", isSPSRunning, isSPSRunning);
+      assertEquals(isSPSRunning, isSPSRunning, property + " has wrong value");
     String actual = nameNode.getConf().get(property,
         DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
-    assertEquals(property + " has wrong value", expected,
-        StoragePolicySatisfierMode.fromString(actual));
+      assertEquals(expected,
+              StoragePolicySatisfierMode.fromString(actual), property + " has wrong value");
   }
 
   @Test
@@ -354,29 +354,29 @@
     final DatanodeManager datanodeManager = nameNode.namesystem
         .getBlockManager().getDatanodeManager();
 
-    assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set",
-        customizedBlockInvalidateLimit,
-        datanodeManager.getBlockInvalidateLimit());
+      assertEquals(
+              customizedBlockInvalidateLimit,
+              datanodeManager.getBlockInvalidateLimit(), DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set");
 
     nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
         Integer.toString(6));
 
-    // 20 * 6 = 120 < 500
-    // Invalid block limit should stay same as before after reconfiguration.
-    assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
-            + " is not honored after reconfiguration",
-        customizedBlockInvalidateLimit,
-        datanodeManager.getBlockInvalidateLimit());
+      // 20 * 6 = 120 < 500
+      // Invalid block limit should stay same as before after reconfiguration.
+      assertEquals(
+              customizedBlockInvalidateLimit,
+              datanodeManager.getBlockInvalidateLimit(), DFS_BLOCK_INVALIDATE_LIMIT_KEY
+              + " is not honored after reconfiguration");
 
     nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
         Integer.toString(50));
 
-    // 20 * 50 = 1000 > 500
-    // Invalid block limit should be reset to 1000
-    assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
-            + " is not reconfigured correctly",
-        1000,
-        datanodeManager.getBlockInvalidateLimit());
+      // 20 * 50 = 1000 > 500
+      // Invalid block limit should be reset to 1000
+      assertEquals(
+              1000,
+              datanodeManager.getBlockInvalidateLimit(), DFS_BLOCK_INVALIDATE_LIMIT_KEY
+              + " is not reconfigured correctly");
   }
 
   @Test
@@ -394,7 +394,7 @@
     assertEquals(true, FSImageFormatProtobuf.getEnableParallelLoad());
   }
 
-  @After
+  @AfterEach
   public void shutDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
index 51389c8..ed7c9da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.spy;
 
@@ -584,7 +582,7 @@
     }
 
     File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
-    assertTrue("Should exist: " + editFile, editFile.exists());
+      assertTrue(editFile.exists(), "Should exist: " + editFile);
 
     // Corrupt the edit log
     LOG.info("corrupting edit log file '" + editFile + "'");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
index f86ce5f..7f7965a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -35,8 +33,8 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 public class TestNameNodeResourceChecker {
@@ -45,7 +43,7 @@
   private File baseDir;
   private File nameDir;
 
-  @Before
+  @BeforeEach
   public void setUp () throws IOException {
     conf = new Configuration();
     nameDir = new File(BASE_DIR, "resource-check-name-dir");
@@ -62,10 +60,10 @@
       throws IOException {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, 0);
     NameNodeResourceChecker nb = new NameNodeResourceChecker(conf);
-    assertTrue(
-        "isResourceAvailable must return true if " +
-            "disk usage is lower than threshold",
-        nb.hasAvailableDiskSpace());
+      assertTrue(
+              nb.hasAvailableDiskSpace(),
+              "isResourceAvailable must return true if " +
+                      "disk usage is lower than threshold");
   }
 
   /**
@@ -76,10 +74,10 @@
   public void testCheckAvailabilityNeg() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE);
     NameNodeResourceChecker nb = new NameNodeResourceChecker(conf);
-    assertFalse(
-        "isResourceAvailable must return false if " +
-            "disk usage is higher than threshold",
-        nb.hasAvailableDiskSpace());
+      assertFalse(
+              nb.hasAvailableDiskSpace(),
+              "isResourceAvailable must return false if " +
+                      "disk usage is higher than threshold");
   }
 
   /**
@@ -114,10 +112,10 @@
           break;
         }
       }
-      assertTrue("NN resource monitor should be running",
-          isNameNodeMonitorRunning);
-      assertFalse("NN should not presently be in safe mode",
-          cluster.getNameNode().isInSafeMode());
+        assertTrue(
+                isNameNodeMonitorRunning, "NN resource monitor should be running");
+        assertFalse(
+                cluster.getNameNode().isInSafeMode(), "NN should not presently be in safe mode");
 
       mockResourceChecker.setResourcesAvailable(false);
 
@@ -128,8 +126,8 @@
         Thread.sleep(1000);
       }
 
-      assertTrue("NN should be in safe mode after resources crossed threshold",
-          cluster.getNameNode().isInSafeMode());
+        assertTrue(
+                cluster.getNameNode().isInSafeMode(), "NN should be in safe mode after resources crossed threshold");
     } finally {
       if (cluster != null)
         cluster.shutdown();
@@ -153,8 +151,8 @@
 
     NameNodeResourceChecker nb = new NameNodeResourceChecker(conf);
 
-    assertEquals("Should not check the same volume more than once.",
-        1, nb.getVolumesLowOnSpace().size());
+      assertEquals(
+              1, nb.getVolumesLowOnSpace().size(), "Should not check the same volume more than once.");
   }
 
   /**
@@ -172,8 +170,8 @@
 
     NameNodeResourceChecker nb = new NameNodeResourceChecker(conf);
 
-    assertEquals("Should not check the same volume more than once.",
-        1, nb.getVolumesLowOnSpace().size());
+      assertEquals(
+              1, nb.getVolumesLowOnSpace().size(), "Should not check the same volume more than once.");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
index 6e0657c..2545125 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
@@ -17,16 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import org.junit.jupiter.api.Test;
+
 import java.util.ArrayList;
 import java.util.Collection;
 
-import org.junit.Test;
-
 public class TestNameNodeResourcePolicy {
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
index 594b07b..76fa085 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
 import static org.hamcrest.core.Is.is;
 import static org.hamcrest.core.IsNot.not;
 
@@ -37,7 +35,9 @@
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.assertTrue;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -212,8 +212,8 @@
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
       String address = cluster.getNameNode().getHttpAddress().toString();
-      assertFalse("HTTP Bind address not expected to be wildcard by default.",
-                  address.startsWith(WILDCARD_ADDRESS));
+        assertFalse(
+                address.startsWith(WILDCARD_ADDRESS), "HTTP Bind address not expected to be wildcard by default.");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -232,8 +232,8 @@
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
       String address = cluster.getNameNode().getHttpAddress().toString();
-      assertTrue("HTTP Bind address " + address + " is not wildcard.",
-                 address.startsWith(WILDCARD_ADDRESS));
+        assertTrue(
+                address.startsWith(WILDCARD_ADDRESS), "HTTP Bind address " + address + " is not wildcard.");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -286,8 +286,8 @@
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
       String address = cluster.getNameNode().getHttpsAddress().toString();
-      assertFalse("HTTP Bind address not expected to be wildcard by default.",
-                  address.startsWith(WILDCARD_ADDRESS));
+        assertFalse(
+                address.startsWith(WILDCARD_ADDRESS), "HTTP Bind address not expected to be wildcard by default.");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -306,8 +306,8 @@
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
       String address = cluster.getNameNode().getHttpsAddress().toString();
-      assertTrue("HTTP Bind address " + address + " is not wildcard.",
-                 address.startsWith(WILDCARD_ADDRESS));
+        assertTrue(
+                address.startsWith(WILDCARD_ADDRESS), "HTTP Bind address " + address + " is not wildcard.");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java
index 8fb0f60..fa873fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java
@@ -28,11 +28,11 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.Before;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY;
 
 /**
@@ -53,7 +53,7 @@
   private RetryCacheMetrics metrics;
 
   /** Start a cluster */
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration();
     conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
@@ -74,7 +74,7 @@
    * Cleanup after the test
    * @throws IOException
    **/
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -98,9 +98,9 @@
   }
 
   private void checkMetrics(long hit, long cleared, long updated) {
-    assertEquals("CacheHit", hit, metrics.getCacheHit());
-    assertEquals("CacheCleared", cleared, metrics.getCacheCleared());
-    assertEquals("CacheUpdated", updated, metrics.getCacheUpdated());
+      assertEquals(hit, metrics.getCacheHit(), "CacheHit");
+      assertEquals(cleared, metrics.getCacheCleared(), "CacheCleared");
+      assertEquals(updated, metrics.getCacheUpdated(), "CacheUpdated");
   }
 
   private void trySaveNamespace() throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
index ada93e8..704b154 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
@@ -25,14 +25,13 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestNameNodeRpcServer {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java
index a32e218..5b44db9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServerMethods.java
@@ -26,10 +26,10 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestNameNodeRpcServerMethods {
   private static NamenodeProtocols nnRpc;
@@ -37,7 +37,7 @@
   private static MiniDFSCluster cluster;
 
   /** Start a cluster */
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).build();
@@ -53,7 +53,7 @@
    * @throws SafeModeException
    * @throws AccessControlException
    */
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -66,7 +66,7 @@
     String dir = "/testNamenodeRetryCache/testDelete";
     try {
       nnRpc.deleteSnapshot(dir, null);
-      Assert.fail("testdeleteSnapshot is not thrown expected exception ");
+      Assertions.fail("testdeleteSnapshot is not thrown expected exception ");
     } catch (IOException e) {
       // expected
       GenericTestUtils.assertExceptionContains(
@@ -74,7 +74,7 @@
     }
     try {
       nnRpc.deleteSnapshot(dir, "");
-      Assert.fail("testdeleteSnapshot is not thrown expected exception");
+      Assertions.fail("testdeleteSnapshot is not thrown expected exception");
     } catch (IOException e) {
       // expected
       GenericTestUtils.assertExceptionContains(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
index a2896ce..a91939b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java
@@ -26,8 +26,8 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
@@ -60,41 +60,41 @@
 
       // Get attribute "NNRole"
       String nnRole = (String)mbs.getAttribute(mxbeanName, "NNRole");
-      Assert.assertEquals(nn.getNNRole(), nnRole);
+      Assertions.assertEquals(nn.getNNRole(), nnRole);
 
       // Get attribute "State"
       String state = (String)mbs.getAttribute(mxbeanName, "State");
-      Assert.assertEquals(nn.getState(), state);
+      Assertions.assertEquals(nn.getState(), state);
 
       // Get attribute "HostAndPort"
       String hostAndPort = (String)mbs.getAttribute(mxbeanName, "HostAndPort");
-      Assert.assertEquals(nn.getHostAndPort(), hostAndPort);
+      Assertions.assertEquals(nn.getHostAndPort(), hostAndPort);
 
       // Get attribute "SecurityEnabled"
       boolean securityEnabled = (boolean)mbs.getAttribute(mxbeanName,
           "SecurityEnabled");
-      Assert.assertEquals(nn.isSecurityEnabled(), securityEnabled);
+      Assertions.assertEquals(nn.isSecurityEnabled(), securityEnabled);
 
       // Get attribute "LastHATransitionTime"
       long lastHATransitionTime = (long)mbs.getAttribute(mxbeanName,
           "LastHATransitionTime");
-      Assert.assertEquals(nn.getLastHATransitionTime(), lastHATransitionTime);
+      Assertions.assertEquals(nn.getLastHATransitionTime(), lastHATransitionTime);
 
       // Get attribute "BytesWithFutureGenerationStamps"
       long bytesWithFutureGenerationStamps = (long)mbs.getAttribute(
           mxbeanName, "BytesWithFutureGenerationStamps");
-      Assert.assertEquals(nn.getBytesWithFutureGenerationStamps(),
+      Assertions.assertEquals(nn.getBytesWithFutureGenerationStamps(),
           bytesWithFutureGenerationStamps);
 
       // Get attribute "SlowPeersReport"
       String slowPeersReport = (String)mbs.getAttribute(mxbeanName,
           "SlowPeersReport");
-      Assert.assertEquals(nn.getSlowPeersReport(), slowPeersReport);
+      Assertions.assertEquals(nn.getSlowPeersReport(), slowPeersReport);
 
       // Get attribute "SlowDisksReport"
       String slowDisksReport = (String)mbs.getAttribute(mxbeanName,
           "SlowDisksReport");
-      Assert.assertEquals(nn.getSlowDisksReport(), slowDisksReport);
+      Assertions.assertEquals(nn.getSlowDisksReport(), slowDisksReport);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -115,7 +115,7 @@
 
     try {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      Assertions.assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
       String slowDiskPath = "test/data1/slowVolume";
       datanode.getDiskMetrics().addSlowDiskForTesting(slowDiskPath, null);
@@ -137,9 +137,9 @@
 
       String slowDisksReport = (String)mbs.getAttribute(
           mxbeanName, "SlowDisksReport");
-      Assert.assertEquals(datanodeManager.getSlowDisksReport(),
+      Assertions.assertEquals(datanodeManager.getSlowDisksReport(),
           slowDisksReport);
-      Assert.assertTrue(slowDisksReport.contains(slowDiskPath));
+      Assertions.assertTrue(slowDisksReport.contains(slowDiskPath));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java
index 1722e12..9c7e483 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java
@@ -21,8 +21,8 @@
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 /**
  * Tests NameNode interaction for all XAttr APIs.
@@ -47,27 +47,27 @@
     fs.setXAttr(target, name2, value2);
     
     Map<String, byte[]> xattrs = fs.getXAttrs(link);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
     
     fs.setXAttr(link, name3, null);
     xattrs = fs.getXAttrs(target);
-    Assert.assertEquals(xattrs.size(), 3);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
-    Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
+    Assertions.assertEquals(xattrs.size(), 3);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertArrayEquals(new byte[0], xattrs.get(name3));
     
     fs.removeXAttr(link, name1);
     xattrs = fs.getXAttrs(target);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
-    Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertArrayEquals(new byte[0], xattrs.get(name3));
     
     fs.removeXAttr(target, name3);
     xattrs = fs.getXAttrs(link);
-    Assert.assertEquals(xattrs.size(), 1);
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 1);
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
     
     fs.delete(linkParent, true);
     fs.delete(targetParent, true);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
index a7155b5..0ba9675 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -45,8 +45,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
-import org.junit.Test;
-
+import org.junit.jupiter.api.Test;
 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index 7f6f399..b517b21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -19,9 +19,8 @@
 
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -29,6 +28,8 @@
 import java.util.Iterator;
 import java.util.Map;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.Options.Rename;
@@ -60,10 +61,10 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.LightWeightCache;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Tests for ensuring the namenode retry cache works correctly for
@@ -94,7 +95,7 @@
   private static final int BlockSize = 512;
   
   /** Start a cluster */
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
@@ -112,7 +113,7 @@
    * @throws UnresolvedLinkException 
    * @throws SafeModeException 
    * @throws AccessControlException */
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -173,7 +174,7 @@
     try {
       // Second non-retry call should fail with an exception
       nnRpc.concat(file1, new String[]{file2});
-      Assert.fail("testConcat - expected exception is not thrown");
+      Assertions.fail("testConcat - expected exception is not thrown");
     } catch (IOException e) {
       // Expected
     }
@@ -189,13 +190,13 @@
     newCall();
     nnRpc.mkdirs(dir, perm, true);
     newCall();
-    Assert.assertTrue(nnRpc.delete(dir, false));
-    Assert.assertTrue(nnRpc.delete(dir, false));
-    Assert.assertTrue(nnRpc.delete(dir, false));
+    Assertions.assertTrue(nnRpc.delete(dir, false));
+    Assertions.assertTrue(nnRpc.delete(dir, false));
+    Assertions.assertTrue(nnRpc.delete(dir, false));
     
     // non-retried call fails and gets false as return
     newCall();
-    Assert.assertFalse(nnRpc.delete(dir, false));
+    Assertions.assertFalse(nnRpc.delete(dir, false));
   }
   
   /**
@@ -216,7 +217,7 @@
     try {
       // Second non-retry call should fail with an exception
       nnRpc.createSymlink(target, "/a/b", perm, true);
-      Assert.fail("testCreateSymlink - expected exception is not thrown");
+      Assertions.fail("testCreateSymlink - expected exception is not thrown");
     } catch (IOException e) {
       // Expected
     }
@@ -233,10 +234,10 @@
     HdfsFileStatus status = nnRpc.create(src, perm, "holder",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
         (short) 1, BlockSize, null, null, null);
-    Assert.assertEquals(status, nnRpc.create(src, perm, "holder",
+    Assertions.assertEquals(status, nnRpc.create(src, perm, "holder",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
         (short) 1, BlockSize, null, null, null));
-    Assert.assertEquals(status, nnRpc.create(src, perm, "holder",
+    Assertions.assertEquals(status, nnRpc.create(src, perm, "holder",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
         (short) 1, BlockSize, null, null, null));
     // A non-retried call fails
@@ -245,7 +246,7 @@
       nnRpc.create(src, perm, "holder",
           new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
           true, (short) 1, BlockSize, null, null, null);
-      Assert.fail("testCreate - expected exception is not thrown");
+      Assertions.fail("testCreate - expected exception is not thrown");
     } catch (IOException e) {
       // expected
     }
@@ -265,9 +266,9 @@
     newCall();
     LastBlockWithStatus b = nnRpc.append(src, "holder",
         new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
-    Assert.assertEquals(b, nnRpc.append(src, "holder",
+    Assertions.assertEquals(b, nnRpc.append(src, "holder",
         new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
-    Assert.assertEquals(b, nnRpc.append(src, "holder",
+    Assertions.assertEquals(b, nnRpc.append(src, "holder",
         new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
     
     // non-retried call fails
@@ -275,7 +276,7 @@
     try {
       nnRpc.append(src, "holder",
           new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
-      Assert.fail("testAppend - expected exception is not thrown");
+      Assertions.fail("testAppend - expected exception is not thrown");
     } catch (Exception e) {
       // Expected
     }
@@ -294,13 +295,13 @@
     
     // Retried renames succeed
     newCall();
-    Assert.assertTrue(nnRpc.rename(src, target));
-    Assert.assertTrue(nnRpc.rename(src, target));
-    Assert.assertTrue(nnRpc.rename(src, target));
+    Assertions.assertTrue(nnRpc.rename(src, target));
+    Assertions.assertTrue(nnRpc.rename(src, target));
+    Assertions.assertTrue(nnRpc.rename(src, target));
     
     // A non-retried request fails
     newCall();
-    Assert.assertFalse(nnRpc.rename(src, target));
+    Assertions.assertFalse(nnRpc.rename(src, target));
   }
   
   /**
@@ -323,7 +324,7 @@
     newCall();
     try {
       nnRpc.rename2(src, target, Rename.NONE);
-      Assert.fail("testRename 2 expected exception is not thrown");
+      Assertions.fail("testRename 2 expected exception is not thrown");
     } catch (IOException e) {
       // expected
     }
@@ -378,15 +379,15 @@
     // Test retry of create snapshot
     newCall();
     String name = nnRpc.createSnapshot(dir, "snap1");
-    Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
-    Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
-    Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
+    Assertions.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
+    Assertions.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
+    Assertions.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
     
     // Non retried calls should fail
     newCall();
     try {
       nnRpc.createSnapshot(dir, "snap1");
-      Assert.fail("testSnapshotMethods expected exception is not thrown");
+      Assertions.fail("testSnapshotMethods expected exception is not thrown");
     } catch (IOException e) {
       // exptected
     }
@@ -401,7 +402,7 @@
     newCall();
     try {
       nnRpc.renameSnapshot(dir, "snap1", "snap2");
-      Assert.fail("testSnapshotMethods expected exception is not thrown");
+      Assertions.fail("testSnapshotMethods expected exception is not thrown");
     } catch (IOException e) {
       // expected
     }
@@ -416,7 +417,7 @@
     newCall();
     try {
       nnRpc.deleteSnapshot(dir, "snap2");
-      Assert.fail("testSnapshotMethods expected exception is not thrown");
+      Assertions.fail("testSnapshotMethods expected exception is not thrown");
     } catch (IOException e) {
       // expected
     }
@@ -426,11 +427,11 @@
   public void testRetryCacheConfig() {
     // By default retry configuration should be enabled
     Configuration conf = new HdfsConfiguration();
-    Assert.assertNotNull(FSNamesystem.initRetryCache(conf));
+    Assertions.assertNotNull(FSNamesystem.initRetryCache(conf));
     
     // If retry cache is disabled, it should not be created
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, false);
-    Assert.assertNull(FSNamesystem.initRetryCache(conf));
+    Assertions.assertNull(FSNamesystem.initRetryCache(conf));
   }
   
   /**
@@ -444,7 +445,7 @@
 
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 39, cacheSet.size());
+      assertEquals(39, cacheSet.size(), "Retry cache size is wrong");
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -463,7 +464,7 @@
     assertTrue(namesystem.hasRetryCache());
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
         .getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 39, cacheSet.size());
+      assertEquals(39, cacheSet.size(), "Retry cache size is wrong");
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java
index 5bcb3a5..546f843 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java
@@ -30,8 +30,8 @@
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.apache.hadoop.net.Node;
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -42,9 +42,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test to ensure that the StorageType and StorageID sent from Namenode
@@ -58,7 +56,7 @@
 
   private MiniDFSCluster cluster;
 
-  @After
+  @AfterEach
   public void tearDown() {
     shutdown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java
index 4a9fa71..a136709 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNestedEncryptionZones.java
@@ -34,17 +34,15 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
 import java.io.File;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test the behavior of nested encryption zones.
@@ -88,7 +86,7 @@
         .getProvider());
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     Configuration conf = new HdfsConfiguration();
     FileSystemTestHelper fsHelper = new FileSystemTestHelper();
@@ -114,7 +112,7 @@
     DFSTestUtil.createKey(NESTED_EZ_KEY, cluster, conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -188,12 +186,12 @@
     final Path expectedNestedEZTrash = fs.makeQualified(
         new Path(nestedEZDir, suffixTrashPath));
 
-    assertEquals("Top ez trash should be " + expectedTopEZTrash,
-        expectedTopEZTrash, topEZTrash);
-    assertEquals("Root trash should be equal with TopEZFile trash",
-        topEZTrash, rootTrash);
-    assertEquals("Nested ez Trash should be " + expectedNestedEZTrash,
-        expectedNestedEZTrash, nestedEZTrash);
+      assertEquals(
+              expectedTopEZTrash, topEZTrash, "Top ez trash should be " + expectedTopEZTrash);
+      assertEquals(
+              topEZTrash, rootTrash, "Root trash should be equal with TopEZFile trash");
+      assertEquals(
+              expectedNestedEZTrash, nestedEZTrash, "Nested ez Trash should be " + expectedNestedEZTrash);
 
     // delete rename file and test trash
     FsShell shell = new FsShell(fs.getConf());
@@ -205,9 +203,9 @@
     ToolRunner.run(shell, new String[]{"-rm", topEZFile.toString()});
     ToolRunner.run(shell, new String[]{"-rm", nestedEZFile.toString()});
 
-    assertTrue("File not in trash : " + topTrashFile, fs.exists(topTrashFile));
-    assertTrue(
-        "File not in trash : " + nestedTrashFile, fs.exists(nestedTrashFile));
+      assertTrue(fs.exists(topTrashFile), "File not in trash : " + topTrashFile);
+      assertTrue(fs.exists(nestedTrashFile),
+              "File not in trash : " + nestedTrashFile);
   }
 
   private void renameChildrenOfEZ() throws Exception{
@@ -285,14 +283,14 @@
   }
 
   private void verifyEncryption() throws Exception {
-    assertEquals("Top EZ dir is encrypted",
-        true, fs.getFileStatus(topEZDir).isEncrypted());
-    assertEquals("Nested EZ dir is encrypted",
-        true, fs.getFileStatus(nestedEZDir).isEncrypted());
-    assertEquals("Top zone file is encrypted",
-        true, fs.getFileStatus(topEZFile).isEncrypted());
-    assertEquals("Nested zone file is encrypted",
-        true, fs.getFileStatus(nestedEZFile).isEncrypted());
+      assertEquals(
+              true, fs.getFileStatus(topEZDir).isEncrypted(), "Top EZ dir is encrypted");
+      assertEquals(
+              true, fs.getFileStatus(nestedEZDir).isEncrypted(), "Nested EZ dir is encrypted");
+      assertEquals(
+              true, fs.getFileStatus(topEZFile).isEncrypted(), "Top zone file is encrypted");
+      assertEquals(
+              true, fs.getFileStatus(nestedEZFile).isEncrypted(), "Nested zone file is encrypted");
 
     DFSTestUtil.verifyFilesEqual(fs, topEZBaseFile, topEZFile, len);
     DFSTestUtil.verifyFilesEqual(fs, nestedEZBaseFile, nestedEZFile, len);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java
index 7796ed41..a6e5105 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNetworkTopologyServlet.java
@@ -24,7 +24,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.StaticMapping;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -34,8 +34,8 @@
 import java.util.Iterator;
 import java.util.Map;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestNetworkTopologyServlet {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
index 86ae642..4f5d451 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.util.Collections;
@@ -37,7 +35,7 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * A JUnit test for checking if restarting DFS preserves integrity.
@@ -69,8 +67,8 @@
       String[] nameNodeDirs = conf.getStrings(
           DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new String[] {});
       numNamenodeDirs = nameNodeDirs.length;
-      assertTrue("failed to get number of Namenode StorageDirs", 
-          numNamenodeDirs != 0);
+        assertTrue(
+                numNamenodeDirs != 0, "failed to get number of Namenode StorageDirs");
       FileSystem fs = cluster.getFileSystem();
       files.createFiles(fs, dir);
 
@@ -93,8 +91,8 @@
           .numDataNodes(NUM_DATANODES).build();
       fsn = cluster.getNamesystem();
       FileSystem fs = cluster.getFileSystem();
-      assertTrue("Filesystem corrupted after restart.",
-                 files.checkFiles(fs, dir));
+        assertTrue(
+                files.checkFiles(fs, dir), "Filesystem corrupted after restart.");
 
       final FileStatus newrootstatus = fs.getFileStatus(rootpath);
       assertEquals(rootmtime, newrootstatus.getModificationTime());
@@ -114,9 +112,9 @@
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
       cluster.getNameNodeRpc().saveNamespace(0, 0);
       final String checkAfterModify = checkImages(fsn, numNamenodeDirs);
-      assertFalse("Modified namespace should change fsimage contents. " +
-          "was: " + checkAfterRestart + " now: " + checkAfterModify,
-          checkAfterRestart.equals(checkAfterModify));
+        assertFalse(
+                checkAfterRestart.equals(checkAfterModify), "Modified namespace should change fsimage contents. " +
+                "was: " + checkAfterRestart + " now: " + checkAfterModify);
       fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
       files.cleanup(fs, dir);
     } finally {
@@ -138,11 +136,11 @@
       FSNamesystem fsn, int numImageDirs)
   throws Exception {    
     NNStorage stg = fsn.getFSImage().getStorage();
-    //any failed StorageDirectory is removed from the storageDirs list
-    assertEquals("Some StorageDirectories failed Upgrade",
-        numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE));
-    assertTrue("Not enough fsimage copies in MiniDFSCluster " + 
-        "to test parallel write", numImageDirs > 1);
+      //any failed StorageDirectory is removed from the storageDirs list
+      assertEquals(
+              numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE), "Some StorageDirectories failed Upgrade");
+      assertTrue(numImageDirs > 1, "Not enough fsimage copies in MiniDFSCluster " +
+              "to test parallel write");
 
     // List of "current/" directory from each SD
     List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java
index 189f34c..6aa9e3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 import java.util.Arrays;
 
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 5a6d12a..2907736 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -43,7 +43,8 @@
 import java.util.List;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test persistence of satisfying files/directories.
@@ -449,8 +450,8 @@
       FSNamesystem namesystem = cluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode("/parent");
       XAttrFeature f = inode.getXAttrFeature();
-      assertTrue("SPS xAttr should be exist",
-          f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null);
+        assertTrue(
+                f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null, "SPS xAttr should be exist");
 
       // check for the child, SPS xAttr should not be there
       for (int i = 0; i < 5; i++) {
@@ -497,8 +498,8 @@
       FSNamesystem namesystem = cluster.getNamesystem();
       INode inode = namesystem.getFSDirectory().getINode("/file");
       XAttrFeature f = inode.getXAttrFeature();
-      assertTrue("SPS xAttr should be exist",
-          f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null);
+        assertTrue(
+                f.getXAttr(XATTR_SATISFY_STORAGE_POLICY) != null, "SPS xAttr should be exist");
 
       cluster.restartDataNode(stopDataNode, false);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
index d17d800..21f145f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -288,7 +288,7 @@
       if (scanLogFile.exists()) {
         // wait for one minute for deletion to succeed;
         for (int i = 0; !scanLogFile.delete(); i++) {
-          assertTrue("Could not delete log file in one minute", i < 60);
+            assertTrue(i < 60, "Could not delete log file in one minute");
           try {
             Thread.sleep(1000);
           } catch (InterruptedException ignored) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
index ea68ee7..00dea6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java
@@ -33,7 +33,7 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -43,10 +43,10 @@
 import java.util.*;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROTECTED_SUBDIRECTORIES_ENABLE;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_PROTECTED_DIRECTORIES;
 
 /**
@@ -238,22 +238,22 @@
     nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, protectedPathsStrNew);
 
     FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory();
-    // verify change
-    assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
-        protectedPathsNew, fsDirectory.getProtectedDirectories());
+      // verify change
+      assertEquals(
+              protectedPathsNew, fsDirectory.getProtectedDirectories(), String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES));
 
-    assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
-        protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES));
+      assertEquals(
+              protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES), String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES));
 
     // revert to default
     nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, null);
 
-    // verify default
-    assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
-        new TreeSet<String>(), fsDirectory.getProtectedDirectories());
+      // verify default
+      assertEquals(
+              new TreeSet<String>(), fsDirectory.getProtectedDirectories(), String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES));
 
-    assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES),
-        null, nn.getConf().get(FS_PROTECTED_DIRECTORIES));
+      assertEquals(
+              null, nn.getConf().get(FS_PROTECTED_DIRECTORIES), String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES));
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
index 6f69792..7c79963 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
@@ -32,19 +32,17 @@
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.IOException;
 
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.allOf;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestQuotaByStorageType {
 
@@ -61,7 +59,7 @@
   protected static final Logger LOG =
       LoggerFactory.getLogger(TestQuotaByStorageType.class);
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -77,7 +75,7 @@
     refreshClusterState();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -207,8 +205,8 @@
 
     QuotaCounts counts = fnode.computeQuotaUsage(
         fsn.getBlockManager().getStoragePolicySuite(), true);
-    assertEquals(fnode.dumpTreeRecursively().toString(), 0,
-        counts.getTypeSpaces().get(StorageType.SSD));
+      assertEquals(0,
+              counts.getTypeSpaces().get(StorageType.SSD), fnode.dumpTreeRecursively().toString());
 
     ContentSummary cs = dfs.getContentSummary(foo);
     assertEquals(cs.getSpaceConsumed(), 0);
@@ -466,10 +464,10 @@
     // Validate the computeQuotaUsage()
     QuotaCounts counts = fnode.computeQuotaUsage(
         fsn.getBlockManager().getStoragePolicySuite(), true);
-    assertEquals(fnode.dumpTreeRecursively().toString(), 1,
-        counts.getNameSpace());
-    assertEquals(fnode.dumpTreeRecursively().toString(), 0,
-        counts.getStorageSpace());
+      assertEquals(1,
+              counts.getNameSpace(), fnode.dumpTreeRecursively().toString());
+      assertEquals(0,
+              counts.getStorageSpace(), fnode.dumpTreeRecursively().toString());
   }
 
   /**
@@ -578,8 +576,8 @@
 
     QuotaCounts counts1 = sub1Node.computeQuotaUsage(
         fsn.getBlockManager().getStoragePolicySuite(), true);
-    assertEquals(sub1Node.dumpTreeRecursively().toString(), file1Len,
-        counts1.getTypeSpaces().get(StorageType.SSD));
+      assertEquals(file1Len,
+              counts1.getTypeSpaces().get(StorageType.SSD), sub1Node.dumpTreeRecursively().toString());
 
     ContentSummary cs1 = dfs.getContentSummary(sub1);
     assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION);
@@ -596,8 +594,8 @@
 
     QuotaCounts counts2 = sub1Node.computeQuotaUsage(
         fsn.getBlockManager().getStoragePolicySuite(), true);
-    assertEquals(sub1Node.dumpTreeRecursively().toString(), 0,
-        counts2.getTypeSpaces().get(StorageType.SSD));
+      assertEquals(0,
+              counts2.getTypeSpaces().get(StorageType.SSD), sub1Node.dumpTreeRecursively().toString());
 
     ContentSummary cs2 = dfs.getContentSummary(sub1);
     assertEquals(cs2.getSpaceConsumed(), 0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaCounts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaCounts.java
index e731f68..637c928 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaCounts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaCounts.java
@@ -19,10 +19,10 @@
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
 
 /**
  * Test QuotaCounts.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
index e89ed8d..348b861 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
@@ -31,10 +31,10 @@
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.Rule;
 import org.junit.rules.Timeout;
 
@@ -64,7 +64,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     blockSize = 1024 * 1024;
     ecPolicy = getEcPolicy();
@@ -92,7 +92,7 @@
     dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -124,8 +124,8 @@
       final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
           .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
       // When we add a new block we update the quota using the full block size.
-      Assert.assertEquals(blockSize * groupSize, spaceUsed);
-      Assert.assertEquals(blockSize * groupSize, diskUsed);
+      Assertions.assertEquals(blockSize * groupSize, spaceUsed);
+      Assertions.assertEquals(blockSize * groupSize, diskUsed);
 
       dfs.getClient().getNamenode().complete(file.toString(),
           dfs.getClient().getClientName(), previous, fileNode.getId());
@@ -135,9 +135,9 @@
       final long actualDiskUsed = dirNode.getDirectoryWithQuotaFeature()
           .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
       // In this case the file's real size is cell size * block group size.
-      Assert.assertEquals(cellSize * groupSize,
+      Assertions.assertEquals(cellSize * groupSize,
           actualSpaceUsed);
-      Assert.assertEquals(cellSize * groupSize,
+      Assertions.assertEquals(cellSize * groupSize,
           actualDiskUsed);
     } finally {
       IOUtils.cleanupWithLogger(null, out);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
index fc307bf..bbbdfa2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
@@ -45,8 +45,8 @@
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -54,9 +54,7 @@
 import java.util.Iterator;
 import java.util.List;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestReconstructStripedBlocks {
   public static final Logger LOG = LoggerFactory.getLogger(
@@ -167,8 +165,8 @@
       DataNode lastDn = cluster.getDataNodes().get(groupSize);
       DatanodeDescriptor last =
           bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId());
-      assertEquals("Counting the number of outstanding EC tasks", numBlocks,
-          last.getNumberOfBlocksToBeErasureCoded());
+        assertEquals(numBlocks,
+                last.getNumberOfBlocksToBeErasureCoded(), "Counting the number of outstanding EC tasks");
       List<BlockECReconstructionInfo> reconstruction =
           last.getErasureCodeCommand(numBlocks);
       for (BlockECReconstructionInfo info : reconstruction) {
@@ -348,7 +346,7 @@
           Thread.sleep(1000);
         }
       }
-      Assert.assertTrue(reconstructed);
+      Assertions.assertTrue(reconstructed);
 
       blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
       block = (LocatedStripedBlock) blks.getLastLocatedBlock();
@@ -357,7 +355,7 @@
         bitSet.set(index);
       }
       for (int i = 0; i < groupSize; i++) {
-        Assert.assertTrue(bitSet.get(i));
+        Assertions.assertTrue(bitSet.get(i));
       }
     } finally {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java
index 1a1fc16..5796d39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -39,9 +39,9 @@
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test RedudantBlocks.
@@ -62,7 +62,7 @@
   private final int blockSize = stripesPerBlock * cellSize;
   private final int numDNs = groupSize;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
@@ -79,7 +79,7 @@
         ecPolicy.getName());
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
index 7f4be8a..9694d1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
@@ -60,18 +60,14 @@
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
+
 import org.junit.rules.Timeout;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -108,7 +104,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(180 * 1000);
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration();
     fsHelper = new FileSystemTestHelper();
@@ -151,7 +147,7 @@
         .setKeyProvider(cluster.getNameNode().getNamesystem().getProvider());
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -414,8 +410,8 @@
 
     assertKeyVersionEquals(encFile0, fei0new);
     assertKeyVersionEquals(encFile9, fei9new);
-    assertNull("Re-encrypt queue should be empty after restart",
-        getReencryptionStatus().getNextUnprocessedZone());
+      assertNull(
+              getReencryptionStatus().getNextUnprocessedZone(), "Re-encrypt queue should be empty after restart");
   }
 
   @Test
@@ -494,18 +490,18 @@
     restartClusterDisableReencrypt();
 
     final Long zoneId = fsn.getFSDirectory().getINode(zone.toString()).getId();
-    assertEquals("Re-encrypt should restore to the last checkpoint zone",
-        zoneId, getReencryptionStatus().getNextUnprocessedZone());
-    assertEquals("Re-encrypt should restore to the last checkpoint file",
-        new Path(subdir, "4").toString(),
-        getEzManager().getZoneStatus(zone.toString()).getLastCheckpointFile());
+      assertEquals(
+              zoneId, getReencryptionStatus().getNextUnprocessedZone(), "Re-encrypt should restore to the last checkpoint zone");
+      assertEquals(
+              new Path(subdir, "4").toString(),
+              getEzManager().getZoneStatus(zone.toString()).getLastCheckpointFile(), "Re-encrypt should restore to the last checkpoint file");
 
     getEzManager().resumeReencryptForTesting();
     waitForReencryptedZones(1);
     assertKeyVersionChanged(encFile0, fei0);
     assertKeyVersionChanged(encFile9, fei9);
-    assertNull("Re-encrypt queue should be empty after restart",
-        getReencryptionStatus().getNextUnprocessedZone());
+      assertNull(
+              getReencryptionStatus().getNextUnprocessedZone(), "Re-encrypt queue should be empty after restart");
     assertEquals(11, getZoneStatus(zone.toString()).getFilesReencrypted());
   }
 
@@ -545,8 +541,8 @@
 
     assertKeyVersionEquals(encFile0, fei0new);
     assertKeyVersionEquals(encFile9, fei9new);
-    assertNull("Re-encrypt queue should be empty after restart",
-        getReencryptionStatus().getNextUnprocessedZone());
+      assertNull(
+              getReencryptionStatus().getNextUnprocessedZone(), "Re-encrypt queue should be empty after restart");
   }
 
   @Test
@@ -613,12 +609,12 @@
    */
   private void verifyZoneCompletionTime(final ZoneReencryptionStatus zs) {
     assertNotNull(zs);
-    assertTrue("Completion time should be positive. " + zs.getCompletionTime(),
-        zs.getCompletionTime() > 0);
-    assertTrue("Completion time " + zs.getCompletionTime()
-            + " should be no less than submission time "
-            + zs.getSubmissionTime(),
-        zs.getCompletionTime() >= zs.getSubmissionTime());
+      assertTrue(
+              zs.getCompletionTime() > 0, "Completion time should be positive. " + zs.getCompletionTime());
+      assertTrue(
+              zs.getCompletionTime() >= zs.getSubmissionTime(), "Completion time " + zs.getCompletionTime()
+              + " should be no less than submission time "
+              + zs.getSubmissionTime());
   }
 
   @Test
@@ -1381,15 +1377,15 @@
   private void assertKeyVersionChanged(final Path file,
       final FileEncryptionInfo original) throws Exception {
     final FileEncryptionInfo actual = getFileEncryptionInfo(file);
-    assertNotEquals("KeyVersion should be different",
-        original.getEzKeyVersionName(), actual.getEzKeyVersionName());
+      assertNotEquals(
+              original.getEzKeyVersionName(), actual.getEzKeyVersionName(), "KeyVersion should be different");
   }
 
   private void assertKeyVersionEquals(final Path file,
       final FileEncryptionInfo expected) throws Exception {
     final FileEncryptionInfo actual = getFileEncryptionInfo(file);
-    assertEquals("KeyVersion should be the same",
-        expected.getEzKeyVersionName(), actual.getEzKeyVersionName());
+      assertEquals(
+              expected.getEzKeyVersionName(), actual.getEzKeyVersionName(), "KeyVersion should be the same");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
index d4f79b5..f46f4d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java
@@ -28,9 +28,9 @@
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 import org.slf4j.LoggerFactory;
@@ -45,9 +45,9 @@
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
-import static org.junit.Assert.assertTrue;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * Test class for ReencryptionHandler.
@@ -60,7 +60,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(180 * 1000);
 
-  @Before
+  @BeforeEach
   public void setup() {
     GenericTestUtils.setLogLevel(ReencryptionHandler.LOG, Level.TRACE);
   }
@@ -108,10 +108,10 @@
     final StopWatch sw = new StopWatch().start();
     rh.getTraverser().throttle();
     sw.stop();
-    assertTrue("should have throttled for at least 8 second",
-        sw.now(TimeUnit.MILLISECONDS) > 8000);
-    assertTrue("should have throttled for at most 12 second",
-        sw.now(TimeUnit.MILLISECONDS) < 12000);
+      assertTrue(
+              sw.now(TimeUnit.MILLISECONDS) > 8000, "should have throttled for at least 8 second");
+      assertTrue(
+              sw.now(TimeUnit.MILLISECONDS) < 12000, "should have throttled for at most 12 second");
   }
 
   @Test
@@ -139,8 +139,8 @@
     StopWatch sw = new StopWatch().start();
     rh.getTraverser().throttle();
     sw.stop();
-    assertTrue("should not have throttled",
-        sw.now(TimeUnit.MILLISECONDS) < 1000);
+      assertTrue(
+              sw.now(TimeUnit.MILLISECONDS) < 1000, "should not have throttled");
   }
 
   @Test
@@ -199,7 +199,7 @@
     rh.getTraverser().throttle();
     sw.stop();
     LOG.info("Throttle completed, consumed {}", sw.now(TimeUnit.MILLISECONDS));
-    assertTrue("should have throttled for at least 3 second",
-        sw.now(TimeUnit.MILLISECONDS) >= 3000);
+      assertTrue(
+              sw.now(TimeUnit.MILLISECONDS) >= 3000, "should have throttled for at least 3 second");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionWithKMS.java
index 642d5e5..beedb40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionWithKMS.java
@@ -24,16 +24,16 @@
 import org.apache.hadoop.crypto.key.kms.server.KMSWebApp;
 import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
 import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.io.FileWriter;
 import java.io.Writer;
 import java.util.UUID;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test class for re-encryption with minikms.
@@ -49,7 +49,7 @@
         miniKMS.getKMSUrl().toExternalForm().replace("://", "@");
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     kmsDir = "target/test-classes/" + UUID.randomUUID().toString();
     final File dir = new File(kmsDir);
@@ -60,7 +60,7 @@
     super.setup();
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     super.teardown();
     if (miniKMS != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshBlockPlacementPolicy.java
index b431db7..157de56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshBlockPlacementPolicy.java
@@ -29,9 +29,9 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.net.Node;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -41,7 +41,7 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test refresh block placement policy.
@@ -67,7 +67,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     config = new Configuration();
     config.setClass(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
@@ -78,7 +78,7 @@
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     cluster.shutdown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java
index 8dc81f8..e3fb51f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRefreshNamenodeReplicationConfig.java
@@ -24,12 +24,13 @@
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+
 import java.io.IOException;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * This class tests the replication related parameters in the namenode can
@@ -39,7 +40,7 @@
   private MiniDFSCluster cluster = null;
   private BlockManager bm;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     Configuration config = new Configuration();
     config.setInt(
@@ -57,7 +58,7 @@
     bm = cluster.getNameNode().getNamesystem().getBlockManager();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     cluster.shutdown();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index ee91994..0e377db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
@@ -67,8 +64,8 @@
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.Whitebox;
 import org.slf4j.event.Level;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -287,12 +284,12 @@
       // since it's not traversable.
       LOG.info("Doing the first savenamespace.");
       fsn.saveNamespace(0, 0);
-      LOG.info("First savenamespace sucessful.");      
-      
-      assertTrue("Savenamespace should have marked one directory as bad." +
-                 " But found " + storage.getRemovedStorageDirs().size() +
-                 " bad directories.", 
-                   storage.getRemovedStorageDirs().size() == 1);
+      LOG.info("First savenamespace sucessful.");
+
+        assertTrue(
+                storage.getRemovedStorageDirs().size() == 1, "Savenamespace should have marked one directory as bad." +
+                " But found " + storage.getRemovedStorageDirs().size() +
+                " bad directories.");
 
       fs.setPermission(rootPath, permissionAll);
 
@@ -302,11 +299,11 @@
       LOG.info("Doing the second savenamespace.");
       fsn.saveNamespace(0, 0);
       LOG.warn("Second savenamespace sucessful.");
-      assertTrue("Savenamespace should have been successful in removing " +
-                 " bad directories from Image."  +
-                 " But found " + storage.getRemovedStorageDirs().size() +
-                 " bad directories.", 
-                 storage.getRemovedStorageDirs().size() == 0);
+        assertTrue(
+                storage.getRemovedStorageDirs().size() == 0, "Savenamespace should have been successful in removing " +
+                " bad directories from Image."  +
+                " But found " + storage.getRemovedStorageDirs().size() +
+                " bad directories.");
 
       // Now shut down and restart the namesystem
       LOG.info("Shutting down fsimage.");
@@ -673,9 +670,9 @@
       file.addSnapshotFeature(null).getDiffs()
           .saveSelf2Snapshot(-1, file, null, false);
 
-      // make sure it has a diff
-      assertTrue("Snapshot fileDiff is missing.",
-          file.getFileWithSnapshotFeature().getDiffs() != null);
+        // make sure it has a diff
+        assertTrue(
+                file.getFileWithSnapshotFeature().getDiffs() != null, "Snapshot fileDiff is missing.");
 
       // saveNamespace
       fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
@@ -687,10 +684,10 @@
       dir = cluster.getNamesystem().getFSDirectory();
       file = dir.getINode(path).asFile();
 
-      // there should be no snapshot feature for the inode, when there is
-      // no snapshot.
-      assertTrue("There should be no snapshot feature for this INode.",
-          file.getFileWithSnapshotFeature() == null);
+        // there should be no snapshot feature for the inode, when there is
+        // no snapshot.
+        assertTrue(
+                file.getFileWithSnapshotFeature() == null, "There should be no snapshot feature for this INode.");
     } finally {
       cluster.shutdown();
     }
@@ -717,7 +714,7 @@
 
       // make sure no new checkpoint was done
       long after = fsimage.getStorage().getMostRecentCheckpointTxId();
-      Assert.assertEquals(before, after);
+      Assertions.assertEquals(before, after);
 
       Thread.sleep(1000);
       // do another checkpoint. this time set the timewindow to 1s
@@ -726,7 +723,7 @@
       fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
 
       after = fsimage.getStorage().getMostRecentCheckpointTxId();
-      Assert.assertTrue(after > before);
+      Assertions.assertTrue(after > before);
 
       fs.mkdirs(new Path("/foo/bar/baz")); // 3 new tx
 
@@ -734,11 +731,11 @@
       cluster.getNameNodeRpc().saveNamespace(3600, 5); // 3 + end/start segment
       long after2 = fsimage.getStorage().getMostRecentCheckpointTxId();
       // no checkpoint should be made
-      Assert.assertEquals(after, after2);
+      Assertions.assertEquals(after, after2);
       cluster.getNameNodeRpc().saveNamespace(3600, 3);
       after2 = fsimage.getStorage().getMostRecentCheckpointTxId();
       // a new checkpoint should be done
-      Assert.assertTrue(after2 > after);
+      Assertions.assertTrue(after2 > after);
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java
index 345a21c..29aa3d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java
@@ -24,8 +24,9 @@
 import java.util.List;
 import java.util.Map;
 
-import org.junit.Test;
-import org.junit.Before;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -35,7 +36,6 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 
-import org.junit.Assert;
 import org.apache.hadoop.test.GenericTestUtils;
 
 /**
@@ -45,7 +45,7 @@
  */
 public class TestSecondaryNameNodeUpgrade {
 
-  @Before
+  @BeforeEach
   public void cleanupCluster() throws IOException {
     File hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
     System.out.println("cleanupCluster deleting " + hdfsDir);
@@ -114,7 +114,7 @@
   public void testChangeNsIDFails() throws IOException {
     try {
       doIt(ImmutableMap.of("namespaceID", "2"));
-      Assert.fail("Should throw InconsistentFSStateException");
+      Assertions.fail("Should throw InconsistentFSStateException");
     } catch(IOException e) {
       GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e);
       System.out.println("Correctly failed with inconsistent namespaceID: " + e);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
index 9473cb3..1ec17fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
@@ -23,10 +23,10 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import javax.management.*;
 import java.io.IOException;
@@ -39,7 +39,7 @@
   private static SecondaryNameNode snn;
   private static final Configuration conf = new Configuration();
   
-  @BeforeClass
+  @BeforeAll
   public static void setUpCluster() throws IOException {
     conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
         "0.0.0.0:0");
@@ -51,7 +51,7 @@
     snn = new SecondaryNameNode(conf);
   }
   
-  @AfterClass
+  @AfterAll
   public static void shutDownCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -72,10 +72,10 @@
 
     String[] checkpointDir = (String[]) mbs.getAttribute(mxbeanName,
             "CheckpointDirectories");
-    Assert.assertArrayEquals(checkpointDir, snn.getCheckpointDirectories());
+    Assertions.assertArrayEquals(checkpointDir, snn.getCheckpointDirectories());
     String[] checkpointEditlogDir = (String[]) mbs.getAttribute(mxbeanName,
             "CheckpointEditlogDirectories");
-    Assert.assertArrayEquals(checkpointEditlogDir,
+    Assertions.assertArrayEquals(checkpointEditlogDir,
             snn.getCheckpointEditlogDirectories());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java
index c90a91c..3720918 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java
@@ -17,9 +17,8 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -35,9 +34,9 @@
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
@@ -146,8 +145,8 @@
 
       boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
               "SecurityEnabled");
-      Assert.assertFalse(securityEnabled);
-      Assert.assertEquals(namenode.isSecurityEnabled(), securityEnabled);
+      Assertions.assertFalse(securityEnabled);
+      Assertions.assertEquals(namenode.isSecurityEnabled(), securityEnabled);
     }
 
     // get attribute "SecurityEnabled" with secure configuration
@@ -162,8 +161,8 @@
 
       boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
               "SecurityEnabled");
-      Assert.assertTrue(securityEnabled);
-      Assert.assertEquals(namenode.isSecurityEnabled(), securityEnabled);
+      Assertions.assertTrue(securityEnabled);
+      Assertions.assertEquals(namenode.isSecurityEnabled(), securityEnabled);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java
index d36b9c9..2e79f81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java
@@ -17,10 +17,7 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -36,9 +33,10 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
+
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test brings up a MiniDFSCluster with 1 NameNode and 0
@@ -57,10 +55,10 @@
 public class TestSecureNameNodeWithExternalKdc {
   final static private int NUM_OF_DATANODES = 0;
 
-  @Before
+  @BeforeEach
   public void testExternalKdcRunning() {
     // Tests are skipped if external KDC is not running.
-    Assume.assumeTrue(isExternalKdcRunning());
+    Assumptions.assumeTrue(isExternalKdcRunning());
   }
 
   @Test
@@ -72,10 +70,10 @@
       String nnSpnegoPrincipal =
         System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
       String nnKeyTab = System.getProperty("dfs.namenode.keytab.file");
-      assertNotNull("NameNode principal was not specified", nnPrincipal);
-      assertNotNull("NameNode SPNEGO principal was not specified",
-        nnSpnegoPrincipal);
-      assertNotNull("NameNode keytab was not specified", nnKeyTab);
+        assertNotNull(nnPrincipal, "NameNode principal was not specified");
+        assertNotNull(
+                nnSpnegoPrincipal, "NameNode SPNEGO principal was not specified");
+        assertNotNull(nnKeyTab, "NameNode keytab was not specified");
 
       Configuration conf = new HdfsConfiguration();
       conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
@@ -97,8 +95,8 @@
       // The user specified should not be a superuser
       String userPrincipal = System.getProperty("user.principal");
       String userKeyTab = System.getProperty("user.keytab");
-      assertNotNull("User principal was not specified", userPrincipal);
-      assertNotNull("User keytab was not specified", userKeyTab);
+        assertNotNull(userPrincipal, "User principal was not specified");
+        assertNotNull(userKeyTab, "User keytab was not specified");
 
       UserGroupInformation ugi = UserGroupInformation
           .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
index c43c909..458a09b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -38,8 +38,8 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -164,7 +164,7 @@
         FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);        
         long numEdits = loader.loadFSEdits(
             new EditLogFileInputStream(editFile), 1);
-        assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
+          assertEquals(expectedTransactions, numEdits, "Verification for " + editFile);
       }
     } finally {
       if(fileSys != null) fileSys.close();
@@ -196,8 +196,8 @@
         @Override
         public Void answer(InvocationOnMock invocation) throws Throwable {
           // fsn claims read lock if either read or write locked.
-          Assert.assertTrue(fsnRef.get().hasReadLock());
-          Assert.assertFalse(fsnRef.get().hasWriteLock());
+          Assertions.assertTrue(fsnRef.get().hasReadLock());
+          Assertions.assertFalse(fsnRef.get().hasWriteLock());
           return null;
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
index b62a418..9f7e459 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.util.ArrayList;
@@ -34,11 +32,11 @@
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 /** Test snapshot related operations. */
@@ -57,7 +55,7 @@
 
   static private DistributedFileSystem hdfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf)
@@ -71,13 +69,13 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @Before
+  @BeforeEach
   public void reset() throws Exception {
     DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
     DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -91,20 +89,20 @@
     final INode before = fsdir.getINode(pathStr);
     
     // Before a directory is snapshottable
-    Assert.assertFalse(before.asDirectory().isSnapshottable());
+    Assertions.assertFalse(before.asDirectory().isSnapshottable());
 
     // After a directory is snapshottable
     final Path path = new Path(pathStr);
     hdfs.allowSnapshot(path);
     {
       final INode after = fsdir.getINode(pathStr);
-      Assert.assertTrue(after.asDirectory().isSnapshottable());
+      Assertions.assertTrue(after.asDirectory().isSnapshottable());
     }
     
     hdfs.disallowSnapshot(path);
     {
       final INode after = fsdir.getINode(pathStr);
-      Assert.assertFalse(after.asDirectory().isSnapshottable());
+      Assertions.assertFalse(after.asDirectory().isSnapshottable());
     }
   }
   
@@ -155,9 +153,9 @@
       assertEquals(components[i], nodesInPath.getPathComponent(i));
     }
 
-    // The last INode should be associated with file1
-    assertTrue("file1=" + file1 + ", nodesInPath=" + nodesInPath,
-        nodesInPath.getINode(components.length - 1) != null);
+      // The last INode should be associated with file1
+      assertTrue(
+              nodesInPath.getINode(components.length - 1) != null, "file1=" + file1 + ", nodesInPath=" + nodesInPath);
     assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
         file1.toString());
     assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
@@ -244,7 +242,7 @@
       invalidPath = new Path(invalidPath, invalidPathComponent[i]);
       try {
         hdfs.getFileStatus(invalidPath);
-        Assert.fail();
+        Assertions.fail();
       } catch(FileNotFoundException fnfe) {
         System.out.println("The exception is expected: " + fnfe);
       }
@@ -433,7 +431,7 @@
     assertEquals(newNodesInPath.getINode(last).getFullPathName(),
         file1.toString());
     // The modification time of the INode for file3 should have been changed
-    Assert.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
+    Assertions.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
     hdfs.deleteSnapshot(sub1, "s3");
     hdfs.disallowSnapshot(sub1);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 67c8f3c..484fd56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -22,11 +22,8 @@
 import static org.hamcrest.CoreMatchers.allOf;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -75,9 +72,9 @@
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
@@ -98,7 +95,7 @@
   static final int fileSize = 8192;
   private long editsLength=0, fsimageLength=0;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     ExitUtil.disableSystemExit();
     ExitUtil.resetFirstExitException();
@@ -127,7 +124,7 @@
   /**
    * clean up
    */
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
       throw new IOException("Could not delete hdfs directory in tearDown '" + hdfsDir + "'");
@@ -428,7 +425,7 @@
     } catch (ExitException ee) {
       GenericTestUtils.assertExceptionContains(
           ExitUtil.EXIT_EXCEPTION_MESSAGE, ee);
-      assertTrue("Didn't terminate properly ", ExitUtil.terminateCalled());
+        assertTrue(ExitUtil.terminateCalled(), "Didn't terminate properly ");
     }
   }
 
@@ -657,8 +654,8 @@
         Thread.sleep(HEARTBEAT_INTERVAL * 1000);
         info = nn.getDatanodeReport(DatanodeReportType.LIVE);
       }
-      assertEquals("Number of live nodes should be "+numDatanodes, numDatanodes, 
-          info.length);
+        assertEquals(numDatanodes,
+                info.length, "Number of live nodes should be " + numDatanodes);
       
     } catch (IOException e) {
       fail(StringUtils.stringifyException(e));
@@ -727,19 +724,19 @@
       final Collection<URI> nnDirs = FSNamesystem.getNamespaceDirs(config);
       assertNotNull(nnDirs);
       assertTrue(nnDirs.iterator().hasNext());
-      assertEquals(
-          "NN dir should be created after NN startup.",
-          new File(nnDirStr),
-          new File(nnDirs.iterator().next().getPath()));
+        assertEquals(
+                new File(nnDirStr),
+                new File(nnDirs.iterator().next().getPath()),
+                "NN dir should be created after NN startup.");
       final File nnDir = new File(nnDirStr);
       assertTrue(nnDir.exists());
       assertTrue(nnDir.isDirectory());
 
       try {
-        /* set read only */
-        assertTrue(
-            "Setting NN dir read only should succeed.",
-            FileUtil.setWritable(nnDir, false));
+          /* set read only */
+          assertTrue(
+                  FileUtil.setWritable(nnDir, false),
+                  "Setting NN dir read only should succeed.");
         cluster.restartNameNodes();
         fail("Restarting NN should fail on read only NN dir.");
       } catch (InconsistentFSStateException e) {
@@ -750,9 +747,9 @@
             containsString(
                 "storage directory does not exist or is not accessible."))));
       } finally {
-        /* set back to writable in order to clean it */
-        assertTrue("Setting NN dir should succeed.",
-            FileUtil.setWritable(nnDir, true));
+          /* set back to writable in order to clean it */
+          assertTrue(
+                  FileUtil.setWritable(nnDir, true), "Setting NN dir should succeed.");
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
index ef51acc..aad975c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.net.URI;
 import java.util.Arrays;
@@ -29,9 +29,9 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -60,7 +60,7 @@
     this.startOpt = startOption;
   }
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new HdfsConfiguration();
     startOpt.setClusterId(null);
@@ -69,7 +69,7 @@
       Collections.<URI>emptyList());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     conf = null;
     startOpt = null;
@@ -86,8 +86,8 @@
   public void testStartupOptUpgradeFrom204() throws Exception {
     layoutVersion = Feature.RESERVED_REL20_204.getInfo().getLayoutVersion();
     storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
-    assertTrue("Clusterid should start with CID", storage.getClusterID()
-        .startsWith("CID"));
+      assertTrue(storage.getClusterID()
+              .startsWith("CID"), "Clusterid should start with CID");
   }
 
   /**
@@ -102,8 +102,8 @@
     startOpt.setClusterId("cid");
     layoutVersion = Feature.RESERVED_REL22.getInfo().getLayoutVersion();
     storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
-    assertEquals("Clusterid should match with the given clusterid",
-        "cid", storage.getClusterID());
+      assertEquals(
+              "cid", storage.getClusterID(), "Clusterid should match with the given clusterid");
   }
 
   /**
@@ -120,8 +120,8 @@
     storage.setClusterID("currentcid");
     layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
     storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
-    assertEquals("Clusterid should match with the existing one",
-        "currentcid", storage.getClusterID());
+      assertEquals(
+              "currentcid", storage.getClusterID(), "Clusterid should match with the existing one");
   }
 
   /**
@@ -138,8 +138,8 @@
     storage.setClusterID("currentcid");
     layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
     storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
-    assertEquals("Clusterid should match with the existing one",
-        "currentcid", storage.getClusterID());
+      assertEquals(
+              "currentcid", storage.getClusterID(), "Clusterid should match with the existing one");
   }
 
   /**
@@ -156,7 +156,7 @@
     storage.setClusterID("currentcid");
     layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
     storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
-    assertEquals("Clusterid should match with the existing one",
-        "currentcid", storage.getClusterID());
+      assertEquals(
+              "currentcid", storage.getClusterID(), "Clusterid should match with the existing one");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
index 245602e..ec8cf4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
@@ -18,7 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressTestHelper.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.mockito.Mockito.*;
 
 import java.io.ByteArrayOutputStream;
@@ -34,8 +35,8 @@
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.eclipse.jetty.util.ajax.JSON;
 
 public class TestStartupProgressServlet {
@@ -46,7 +47,7 @@
   private StartupProgress startupProgress;
   private StartupProgressServlet servlet;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     startupProgress = new StartupProgress();
     ServletContext context = mock(ServletContext.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
index cf04db0..b9cc4e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithHA.java
@@ -29,8 +29,8 @@
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 /**
  * Tests that StoragePolicySatisfier is able to work with HA enabled.
@@ -95,7 +95,7 @@
         cluster.getNameNode(0).reconfigurePropertyImpl(
             DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
             StoragePolicySatisfierMode.NONE.toString());
-        Assert.fail("It's not allowed to enable or disable"
+        Assertions.fail("It's not allowed to enable or disable"
             + " StoragePolicySatisfier on Standby NameNode");
       } catch (ReconfigurationException e) {
         GenericTestUtils.assertExceptionContains("Could not change property "
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java
index 60c9318..68046bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java
@@ -25,8 +25,8 @@
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.StoragePolicySummary.StorageTypeAllocation;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 public class TestStoragePolicySummary {
   
@@ -51,13 +51,13 @@
     sts.add(new StorageType[]{StorageType.DISK,
         StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
     Map<String, Long> actualOutput = convertToStringMap(sts);
-    Assert.assertEquals(4,actualOutput.size());
+    Assertions.assertEquals(4,actualOutput.size());
     Map<String, Long>  expectedOutput = new HashMap<>();
     expectedOutput.put("HOT|DISK:1(HOT)", 1l);
     expectedOutput.put("HOT|DISK:2(HOT)", 1l);
     expectedOutput.put("HOT|DISK:3(HOT)", 1l);
     expectedOutput.put("HOT|DISK:4(HOT)", 1l);
-    Assert.assertEquals(expectedOutput,actualOutput);
+    Assertions.assertEquals(expectedOutput,actualOutput);
   }
   
   @Test
@@ -75,13 +75,13 @@
     sts.add(new StorageType[]{StorageType.DISK,
         StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
     Map<String, Long> actualOutput = convertToStringMap(sts);
-    Assert.assertEquals(4,actualOutput.size());
+    Assertions.assertEquals(4,actualOutput.size());
     Map<String, Long> expectedOutput = new HashMap<>();
     expectedOutput.put("HOT|DISK:1(HOT)", 1l);
     expectedOutput.put("HOT|DISK:2(HOT)", 2l);
     expectedOutput.put("HOT|DISK:3(HOT)", 2l);
     expectedOutput.put("HOT|DISK:4(HOT)", 1l);
-    Assert.assertEquals(expectedOutput,actualOutput);
+    Assertions.assertEquals(expectedOutput,actualOutput);
   }
   
   @Test
@@ -110,13 +110,13 @@
     sts.add(new StorageType[]{StorageType.ARCHIVE,
         StorageType.ARCHIVE,StorageType.DISK,StorageType.DISK},warm);
     Map<String, Long> actualOutput = convertToStringMap(sts);
-    Assert.assertEquals(4,actualOutput.size());
+    Assertions.assertEquals(4,actualOutput.size());
     Map<String, Long>  expectedOutput = new HashMap<>();
     expectedOutput.put("WARM|DISK:1,ARCHIVE:1(WARM)", 2l);
     expectedOutput.put("WARM|DISK:2,ARCHIVE:1", 3l);
     expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
     expectedOutput.put("WARM|DISK:2,ARCHIVE:2", 1l);
-    Assert.assertEquals(expectedOutput,actualOutput);
+    Assertions.assertEquals(expectedOutput,actualOutput);
   }
   
   @Test
@@ -150,7 +150,7 @@
     sts.add(new StorageType[]{StorageType.ARCHIVE,
         StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
     Map<String, Long> actualOutput = convertToStringMap(sts);
-    Assert.assertEquals(9,actualOutput.size());
+    Assertions.assertEquals(9,actualOutput.size());
     Map<String, Long>  expectedOutput = new HashMap<>();
     expectedOutput.put("HOT|DISK:3(HOT)", 2l);
     expectedOutput.put("COLD|DISK:1,ARCHIVE:2(WARM)", 2l);
@@ -161,7 +161,7 @@
     expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 1l);
     expectedOutput.put("COLD|ARCHIVE:3(COLD)", 1l);
     expectedOutput.put("HOT|DISK:1,ARCHIVE:2(WARM)", 1l);
-    Assert.assertEquals(expectedOutput,actualOutput);
+    Assertions.assertEquals(expectedOutput,actualOutput);
   }
   
   @Test
@@ -191,11 +191,11 @@
     sts.add(new StorageType[]{StorageType.ARCHIVE,
         StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
     Map<String, Long> actualOutput = convertToStringMap(sts);
-    Assert.assertEquals(3,actualOutput.size());
+    Assertions.assertEquals(3,actualOutput.size());
     Map<String, Long>  expectedOutput = new LinkedHashMap<>();
     expectedOutput.put("COLD|ARCHIVE:3(COLD)", 4l);
     expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
     expectedOutput.put("HOT|DISK:3(HOT)", 2l);
-    Assert.assertEquals(expectedOutput.toString(),actualOutput.toString());
+    Assertions.assertEquals(expectedOutput.toString(),actualOutput.toString());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
index 19f1ca9..9599c3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
@@ -21,9 +21,7 @@
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
@@ -49,10 +47,10 @@
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.util.Shell;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
+
 /**
  * Startup and checkpoint tests
  * 
@@ -69,7 +67,7 @@
   static final int fileSize = 8192;
   private File path1, path2, path3;
   private MiniDFSCluster cluster;  
-  @Before
+  @BeforeEach
   public void setUpNameDirs() throws Exception {
     config = new HdfsConfiguration();
     hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
@@ -199,17 +197,17 @@
     FSImageTestUtil.assertFileContentsSame(
         new File(path1, "current/" + getImageFileName(4)),
         new File(path2, "current/" + getImageFileName(4)));
-    assertFalse("Should not have any image in an edits-only directory",
-        new File(path3, "current/" + getImageFileName(4)).exists());
+      assertFalse(
+              new File(path3, "current/" + getImageFileName(4)).exists(), "Should not have any image in an edits-only directory");
 
-    // Should have finalized logs in the directory that didn't fail
-    assertTrue("Should have finalized logs in the directory that didn't fail",
-        new File(path1, "current/" + getFinalizedEditsFileName(1,4)).exists());
-    // Should not have finalized logs in the failed directories
-    assertFalse("Should not have finalized logs in the failed directories",
-        new File(path2, "current/" + getFinalizedEditsFileName(1,4)).exists());
-    assertFalse("Should not have finalized logs in the failed directories",
-        new File(path3, "current/" + getFinalizedEditsFileName(1,4)).exists());
+      // Should have finalized logs in the directory that didn't fail
+      assertTrue(
+              new File(path1, "current/" + getFinalizedEditsFileName(1, 4)).exists(), "Should have finalized logs in the directory that didn't fail");
+      // Should not have finalized logs in the failed directories
+      assertFalse(
+              new File(path2, "current/" + getFinalizedEditsFileName(1, 4)).exists(), "Should not have finalized logs in the failed directories");
+      assertFalse(
+              new File(path3, "current/" + getFinalizedEditsFileName(1, 4)).exists(), "Should not have finalized logs in the failed directories");
     
     // The new log segment should be in all of the directories.
     FSImageTestUtil.assertFileContentsSame(
@@ -280,19 +278,19 @@
 
       executor.executeCommand(cmd);
       restore = fsi.getStorage().getRestoreFailedStorage();
-      assertFalse("After set true call restore is " + restore, restore);
+        assertFalse(restore, "After set true call restore is " + restore);
 
       // run one more time - to set it to true again
       cmd = "-fs NAMENODE -restoreFailedStorage true";
       executor.executeCommand(cmd);
       restore = fsi.getStorage().getRestoreFailedStorage();
-      assertTrue("After set false call restore is " + restore, restore);
+        assertTrue(restore, "After set false call restore is " + restore);
       
       // run one more time - no change in value
       cmd = "-fs NAMENODE -restoreFailedStorage check";
       CommandExecutor.Result cmdResult = executor.executeCommand(cmd);
       restore = fsi.getStorage().getRestoreFailedStorage();
-      assertTrue("After check call restore is " + restore, restore);
+        assertTrue(restore, "After check call restore is " + restore);
       String commandOutput = cmdResult.getCommandOutput();
       commandOutput.trim();
       assertTrue(commandOutput.contains("restoreFailedStorage is set to true"));
@@ -343,18 +341,18 @@
       secondary.doCheckpoint();
       
       printStorages(fsImage);
-      
-      // The created file should still exist in the in-memory FS state after the
-      // checkpoint.
-      assertTrue("path exists before restart", fs.exists(testPath));
+
+        // The created file should still exist in the in-memory FS state after the
+        // checkpoint.
+        assertTrue(fs.exists(testPath), "path exists before restart");
       
       secondary.shutdown();
       
       // Restart the NN so it reloads the edits from on-disk.
       cluster.restartNameNode();
-  
-      // The created file should still exist after the restart.
-      assertTrue("path should still exist after restart", fs.exists(testPath));
+
+        // The created file should still exist after the restart.
+        assertTrue(fs.exists(testPath), "path should still exist after restart");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
index 39e272a..3446abf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -43,10 +43,10 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
 
@@ -54,10 +54,7 @@
 
 import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
 import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * This class tests INodeFile with striped feature.
@@ -89,7 +86,7 @@
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
-  @Before
+  @BeforeEach
   public void init() throws IOException {
     Configuration conf = new HdfsConfiguration();
     ErasureCodingPolicyManager.getInstance().init(conf);
@@ -169,8 +166,8 @@
         null, perm, 0L, 0L, null, null /*replication*/, ecPolicyID,
         1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
 
-    Assert.assertTrue(inodeFile.isStriped());
-    Assert.assertEquals(ecPolicyID.byteValue(),
+    Assertions.assertTrue(inodeFile.isStriped());
+    Assertions.assertEquals(ecPolicyID.byteValue(),
         inodeFile.getErasureCodingPolicyID());
   }
 
@@ -339,37 +336,37 @@
       // Case-1: Verify the behavior of striped blocks
       // Get blocks of striped file
       INode inodeStriped = fsd.getINode("/parentDir/ecDir/ecFile");
-      assertTrue("Failed to get INodeFile for /parentDir/ecDir/ecFile",
-          inodeStriped instanceof INodeFile);
+        assertTrue(
+                inodeStriped instanceof INodeFile, "Failed to get INodeFile for /parentDir/ecDir/ecFile");
       INodeFile inodeStripedFile = (INodeFile) inodeStriped;
       BlockInfo[] stripedBlks = inodeStripedFile.getBlocks();
       for (BlockInfo blockInfo : stripedBlks) {
-        assertFalse("Mistakenly marked the block as deleted!",
-            blockInfo.isDeleted());
+          assertFalse(
+                  blockInfo.isDeleted(), "Mistakenly marked the block as deleted!");
       }
 
       // delete directory with erasure coding policy
       dfs.delete(ecDir, true);
       for (BlockInfo blockInfo : stripedBlks) {
-        assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted());
+          assertTrue(blockInfo.isDeleted(), "Didn't mark the block as deleted!");
       }
 
       // Case-2: Verify the behavior of contiguous blocks
       // Get blocks of contiguous file
       INode inode = fsd.getINode("/parentDir/someFile");
-      assertTrue("Failed to get INodeFile for /parentDir/someFile",
-          inode instanceof INodeFile);
+        assertTrue(
+                inode instanceof INodeFile, "Failed to get INodeFile for /parentDir/someFile");
       INodeFile inodeFile = (INodeFile) inode;
       BlockInfo[] contiguousBlks = inodeFile.getBlocks();
       for (BlockInfo blockInfo : contiguousBlks) {
-        assertFalse("Mistakenly marked the block as deleted!",
-            blockInfo.isDeleted());
+          assertFalse(
+                  blockInfo.isDeleted(), "Mistakenly marked the block as deleted!");
       }
 
       // delete parent directory
       dfs.delete(parentDir, true);
       for (BlockInfo blockInfo : contiguousBlks) {
-        assertTrue("Didn't mark the block as deleted!", blockInfo.isDeleted());
+          assertTrue(blockInfo.isDeleted(), "Didn't mark the block as deleted!");
       }
     } finally {
       if (cluster != null) {
@@ -446,7 +443,7 @@
           fileLen);
       for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
         for (StorageType type : lb.getStorageTypes()) {
-          Assert.assertEquals(StorageType.DISK, type);
+          Assertions.assertEquals(StorageType.DISK, type);
         }
       }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
index 1ec08e4..18696a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -78,9 +76,9 @@
       fail("Didn't get an exception!");
     } catch (IOException ioe) {
       Mockito.verify(mockStorage).reportErrorOnFile(localPath.get(0));
-      assertTrue(
-          "Unexpected exception: " + StringUtils.stringifyException(ioe),
-          ioe.getMessage().contains("Unable to download to any storage"));
+        assertTrue(
+                ioe.getMessage().contains("Unable to download to any storage"),
+                "Unexpected exception: " + StringUtils.stringifyException(ioe));
     } finally {
       cluster.shutdown();      
     }
@@ -110,8 +108,8 @@
 
       TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false);      
       Mockito.verify(mockStorage).reportErrorOnFile(localPaths.get(0));
-      assertTrue("The valid local file should get saved properly",
-          localPaths.get(1).length() > 0);
+        assertTrue(
+                localPaths.get(1).length() > 0, "The valid local file should get saved properly");
     } finally {
       cluster.shutdown();      
     }
@@ -134,7 +132,7 @@
             null, false);
         fail("TransferImage Should fail with timeout");
       } catch (SocketTimeoutException e) {
-        assertEquals("Read should timeout", "Read timed out", e.getMessage());
+          assertEquals("Read timed out", e.getMessage(), "Read should timeout");
       }
     } finally {
       if (testServer != null) {
@@ -177,7 +175,7 @@
             NameNodeFile.IMAGE, 1L);
         fail("TransferImage Should fail with timeout");
       } catch (SocketTimeoutException e) {
-        assertEquals("Upload should timeout", "Read timed out", e.getMessage());
+          assertEquals("Read timed out", e.getMessage(), "Upload should timeout");
       }
     } finally {
       testServer.stop();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
index 06b57f4..8a1fe3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
@@ -28,8 +28,8 @@
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import java.util.ArrayList;
 
@@ -62,18 +62,18 @@
     // be -block + (block - 0.5 block) = -0.5 block
     QuotaCounts count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
-    Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
+    Assertions.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
 
     // case 2: truncate to 1 block
     count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(BLOCKSIZE, null, count);
-    Assert.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION,
+    Assertions.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION,
                         count.getStorageSpace());
 
     // case 3: truncate to 0
     count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(0, null, count);
-    Assert.assertEquals(-(BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION,
+    Assertions.assertEquals(-(BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION,
                         count.getStorageSpace());
   }
 
@@ -87,17 +87,17 @@
     // diff should be +BLOCKSIZE
     QuotaCounts count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
-    Assert.assertEquals(BLOCKSIZE * REPLICATION, count.getStorageSpace());
+    Assertions.assertEquals(BLOCKSIZE * REPLICATION, count.getStorageSpace());
 
     // case 2: truncate to 1 block
     count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(BLOCKSIZE, null, count);
-    Assert.assertEquals(0, count.getStorageSpace());
+    Assertions.assertEquals(0, count.getStorageSpace());
 
     // case 3: truncate to 0
     count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(0, null, count);
-    Assert.assertEquals(0, count.getStorageSpace());
+    Assertions.assertEquals(0, count.getStorageSpace());
   }
 
   @Test
@@ -116,19 +116,19 @@
     // as case 1
     QuotaCounts count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
-    Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
+    Assertions.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
 
     // case 8: truncate to 2 blocks
     // the original 2.5 blocks are in snapshot. the block truncated is not
     // in snapshot. diff should be -0.5 block
     count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
-    Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
+    Assertions.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
 
     // case 9: truncate to 0
     count = new QuotaCounts.Builder().build();
     file.computeQuotaDeltaForTruncate(0, null, count);
-    Assert.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, count
+    Assertions.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, count
         .getStorageSpace());
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
index 0421941..ef3269d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
@@ -41,10 +41,10 @@
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * End-to-end test case for upgrade domain
@@ -70,7 +70,7 @@
   private MiniDFSCluster cluster = null;
   private HostsFileWriter hostsFileWriter = new HostsFileWriter();
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     StaticMapping.resetMap();
     Configuration conf = new HdfsConfiguration();
@@ -89,7 +89,7 @@
     refreshDatanodeAdminProperties();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     hostsFileWriter.cleanup();
     if (cluster != null) {
@@ -203,7 +203,7 @@
         }
       }
       for (DatanodeID datanodeID : expectedDatanodeIDs) {
-        Assert.assertTrue(locs.contains(datanodeID));
+        Assertions.assertTrue(locs.contains(datanodeID));
       }
     }
   }
@@ -256,7 +256,7 @@
           cluster.getNamesystem().getBlockManager()
               .getBlockPlacementPolicy()
               .verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR);
-      Assert.assertTrue(status.isPlacementPolicySatisfied());
+      Assertions.assertTrue(status.isPlacementPolicySatisfied());
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
index 0cf1fed..51b0506 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
@@ -25,8 +25,8 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
 
 import java.io.File;
 import java.io.IOException;
@@ -39,7 +39,7 @@
  */
 public class TestValidateConfigurationSettings {
 
-  @After
+  @AfterEach
   public void cleanUp() {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java
index c527837..b8b6a1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java
@@ -27,9 +27,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 /**
@@ -46,7 +46,7 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
index 5b0922d..a68fd7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java
@@ -23,9 +23,10 @@
 
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.XAttrHelper;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestXAttrFeature {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java
index 41db943..76a83f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapAliasmap.java
@@ -30,15 +30,15 @@
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.net.URL;
 import java.util.Optional;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 /**
  * Test for aliasmap bootstrap.
@@ -47,7 +47,7 @@
 
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     Configuration conf = new Configuration();
     MiniDFSCluster.setupNamenodeProvidedConfiguration(conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index 0e83bec..9140103 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -44,10 +42,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
 public class TestBootstrapStandby {
@@ -60,7 +57,7 @@
   private MiniDFSCluster cluster;
   private NameNode nn0;
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws IOException {
     Configuration conf = new Configuration();
 
@@ -87,7 +84,7 @@
     }
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java
index 1e6dae7..7c3c7c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,10 +34,9 @@
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
 /**
@@ -64,7 +63,7 @@
     return conf;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     Configuration conf = createConfig();
 
@@ -82,7 +81,7 @@
     dfs.close();
   }
   
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index 18f987d..e5b4cc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -20,9 +20,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -54,11 +52,11 @@
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -77,7 +75,7 @@
 
   private final Path testPath= new Path("/TestConsistentReadsObserver");
 
-  @BeforeClass
+  @BeforeAll
   public static void startUpCluster() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, true);
@@ -89,17 +87,17 @@
     dfsCluster = qjmhaCluster.getDfsCluster();
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     dfs = setObserverRead(true);
   }
 
-  @After
+  @AfterEach
   public void cleanUp() throws IOException {
     dfs.delete(testPath, true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutDownCluster() throws IOException {
     if (qjmhaCluster != null) {
       qjmhaCluster.shutdown();
@@ -388,9 +386,9 @@
       fail("listStatus should have thrown exception");
     } catch (RemoteException re) {
       IOException e = re.unwrapRemoteException();
-      assertTrue("should have thrown StandbyException but got "
-              + e.getClass().getSimpleName(),
-          e instanceof StandbyException);
+        assertTrue(
+                e instanceof StandbyException, "should have thrown StandbyException but got "
+                + e.getClass().getSimpleName());
     }
   }
 
@@ -399,9 +397,9 @@
     NameNode nn0 = dfsCluster.getNameNode(0);
     NameNode nn2 = dfsCluster.getNameNode(2);
     HAServiceStatus st = nn0.getRpcServer().getServiceStatus();
-    assertEquals("nn0 is not active", HAServiceState.ACTIVE, st.getState());
+      assertEquals(HAServiceState.ACTIVE, st.getState(), "nn0 is not active");
     st = nn2.getRpcServer().getServiceStatus();
-    assertEquals("nn2 is not observer", HAServiceState.OBSERVER, st.getState());
+      assertEquals(HAServiceState.OBSERVER, st.getState(), "nn2 is not observer");
 
     FileContext fc = FileContext.getFileContext(conf);
     // initialize observer proxy for FileContext
@@ -474,8 +472,8 @@
   }
 
   private void assertSentTo(int nnIdx) throws IOException {
-    assertTrue("Request was not sent to the expected namenode " + nnIdx,
-        HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIdx));
+      assertTrue(
+              HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIdx), "Request was not sent to the expected namenode " + nnIdx);
   }
 
   private DistributedFileSystem setObserverRead(boolean flag) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java
index 6ad237b..15952ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -50,9 +47,8 @@
 import org.apache.hadoop.hdfs.util.PersistentLongFile;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 
 /**
@@ -65,7 +61,7 @@
   
   private Configuration conf;
   
-  @Before
+  @BeforeEach
   public void createConfiguration() {
     conf = new HdfsConfiguration();
     // Turn off persistent IPC, so that the DFSClient can survive NN restart
@@ -121,8 +117,8 @@
           PersistentLongFile prevLongFile = new PersistentLongFile(prevFile, -10);
           PersistentLongFile currLongFile = new PersistentLongFile(new File(currDir,
               fileName), -11);
-          assertTrue("Value in " + fileName + " has decreased on upgrade in "
-              + journalDir, prevLongFile.get() <= currLongFile.get());
+            assertTrue(prevLongFile.get() <= currLongFile.get(), "Value in " + fileName + " has decreased on upgrade in "
+                    + journalDir);
         }
       }
     }
@@ -132,9 +128,9 @@
       boolean shouldExist) {
     File previousDir = new File(rootDir, "previous");
     if (shouldExist) {
-      assertTrue(previousDir + " does not exist", previousDir.exists());
+        assertTrue(previousDir.exists(), previousDir + " does not exist");
     } else {
-      assertFalse(previousDir + " does exist", previousDir.exists());
+        assertFalse(previousDir.exists(), previousDir + " does exist");
     }
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
index 44c3984..c6fc363 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 
@@ -57,9 +57,9 @@
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.util.Lists;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.slf4j.Logger;
@@ -83,7 +83,7 @@
     DFSTestUtil.setNameNodeLogLevel(Level.TRACE);
   }
   
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK);
@@ -112,7 +112,7 @@
     fs = HATestUtil.configureFailoverFs(cluster, conf);
   }
   
-  @After
+  @AfterEach
   public void shutdownCluster() throws Exception {
     if (cluster != null) {
       banner("Shutting down cluster. NN1 metadata:");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java
index 3f86d45..2f4916b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java
@@ -32,8 +32,8 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 import java.util.function.Supplier;
@@ -86,7 +86,7 @@
             try {
               cluster.waitActive();
               BlockLocation[] blocks = fs.getFileBlockLocations(path, 0, 10);
-              Assert.assertEquals(1, blocks.length);
+              Assertions.assertEquals(1, blocks.length);
               return blocks[0].getHosts().length == replicas;
             } catch (IOException e) {
               throw new RuntimeException(e);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
index 2ef48a3..439122a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
@@ -45,9 +45,9 @@
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 import java.io.ByteArrayInputStream;
@@ -60,7 +60,7 @@
 import java.util.HashSet;
 
 import static org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider.OBSERVER_PROBE_RETRY_PERIOD_KEY;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test case for client support of delegation tokens in an HA cluster.
@@ -79,7 +79,7 @@
 
   private volatile boolean catchup = false;
   
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     SecurityUtilTestHelper.setTokenServiceUseIp(true);
     
@@ -107,7 +107,7 @@
         nn0.getNamesystem());
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
index 2b42adc..92ff0af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -41,11 +41,10 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo;
 
-import org.junit.Test;
-
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index ff90121..b30b8bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
@@ -45,9 +43,9 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -61,7 +59,7 @@
   private Configuration conf;
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void startCluster() throws IOException {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
@@ -73,7 +71,7 @@
         .build();
   }
   
-  @After
+  @AfterEach
   public void shutDownCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -110,8 +108,8 @@
         }
         int first = btsms[i].getSerialNoForTesting();
         int second = btsms[j].getSerialNoForTesting();
-        assertFalse("Overlap found for set serial number (" + serialNumber + ") is " + i + ": "
-            + first + " == " + j + ": " + second, first == second);
+          assertFalse(first == second, "Overlap found for set serial number (" + serialNumber + ") is " + i + ": "
+                  + first + " == " + j + ": " + second);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java
index 0a0f8eb..7d36785 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureOfSharedDir.java
@@ -19,10 +19,7 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -43,8 +40,7 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.Test;
-
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 
 public class TestFailureOfSharedDir {
@@ -65,9 +61,9 @@
         bar));
     conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, bar.toString());
     Collection<URI> requiredEditsDirs = FSNamesystem
-        .getRequiredNamespaceEditsDirs(conf); 
-    assertTrue(Joiner.on(",").join(requiredEditsDirs) + " does not contain " + bar,
-        requiredEditsDirs.contains(bar));
+        .getRequiredNamespaceEditsDirs(conf);
+      assertTrue(
+              requiredEditsDirs.contains(bar), Joiner.on(",").join(requiredEditsDirs) + " does not contain " + bar);
   }
 
   /**
@@ -114,11 +110,11 @@
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
         Joiner.on(",").join(localC, localB, localA));
     List<URI> dirs = FSNamesystem.getNamespaceEditsDirs(conf);
-    assertEquals(
-        "Shared dirs should come first, then local dirs, in the order " +
-        "they were listed in the configuration.",
-        Joiner.on(",").join(sharedA, localC, localB, localA),
-        Joiner.on(",").join(dirs));
+      assertEquals(
+              Joiner.on(",").join(sharedA, localC, localB, localA),
+              Joiner.on(",").join(dirs),
+              "Shared dirs should come first, then local dirs, in the order " +
+                      "they were listed in the configuration.");
   }
   
   /**
@@ -158,9 +154,9 @@
 
       NameNode nn1 = cluster.getNameNode(1);
       assertTrue(nn1.isStandbyState());
-      assertFalse(
-          "StandBy NameNode should not go to SafeMode on resource unavailability",
-          nn1.isInSafeMode());
+        assertFalse(
+                nn1.isInSafeMode(),
+                "StandBy NameNode should not go to SafeMode on resource unavailability");
 
       NameNode nn0 = cluster.getNameNode(0);
       try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
index 31fcb14..481ca7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -52,9 +50,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
@@ -110,7 +108,7 @@
     this.useAsyncEditLogging = useAsyncEditLogging;
   }
 
-  @Before
+  @BeforeEach
   public void setUpCluster() throws Exception {
     conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
@@ -161,7 +159,7 @@
     fs = HATestUtil.configureFailoverFs(cluster, conf);
   }
   
-  @After
+  @AfterEach
   public void tearDownCluster() throws Exception {
     if (fs != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestGetGroupsWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestGetGroupsWithHA.java
index c3c2275..d2f6ef75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestGetGroupsWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestGetGroupsWithHA.java
@@ -26,14 +26,14 @@
 import org.apache.hadoop.hdfs.tools.GetGroups;
 import org.apache.hadoop.tools.GetGroupsTestBase;
 import org.apache.hadoop.util.Tool;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestGetGroupsWithHA extends GetGroupsTestBase {
   
   private MiniDFSCluster cluster;
   
-  @Before
+  @BeforeEach
   public void setUpNameNode() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf)
@@ -42,7 +42,7 @@
     HATestUtil.setFailoverConfigurations(cluster, conf);
   }
   
-  @After
+  @AfterEach
   public void tearDownNameNode() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
index ca2c98d..761a790 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.concurrent.ThreadLocalRandom;
@@ -33,7 +33,7 @@
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestHAAppend {
   static final int COUNT = 5;
@@ -113,9 +113,9 @@
       int rc = ToolRunner.run(new DFSck(cluster.getConfiguration(1)),
           new String[] { "/", "-files", "-blocks" });
       assertEquals(0, rc);
-      
-      assertEquals("CorruptBlocks should be empty.", 0, cluster.getNameNode(1)
-          .getNamesystem().getCorruptReplicaBlocks());
+
+        assertEquals(0, cluster.getNameNode(1)
+                .getNamesystem().getCorruptReplicaBlocks(), "CorruptBlocks should be empty.");
 
       AppendTestUtil.checkFullFile(fs, fileToAppend, data.length, data,
           fileToAppend.toString());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java
index 969d315..21f689e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java
@@ -19,10 +19,7 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.MalformedURLException;
@@ -32,6 +29,7 @@
 import java.util.List;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -40,7 +38,6 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
 import org.mockito.Mockito;
 
 /**
@@ -107,8 +104,8 @@
     NameNode.initializeGenericKeys(conf, "ns1", "nn1");
 
     checkpointer = new StandbyCheckpointer(conf, fsn);
-    assertEquals("Got an unexpected number of possible active NNs", 2, checkpointer
-        .getActiveNNAddresses().size());
+      assertEquals(2, checkpointer
+              .getActiveNNAddresses().size(), "Got an unexpected number of possible active NNs");
     assertEquals(new URL("http", "1.2.3.2", DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""),
         checkpointer.getActiveNNAddresses().get(0));
     assertAddressMatches("1.2.3.2", checkpointer.getActiveNNAddresses().get(0));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
index 46ebb8f..bcd432e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
@@ -32,8 +32,8 @@
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
-import org.junit.Test;
 
 public class TestHAFsck {
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
index 8beba74..b846c78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java
@@ -36,8 +36,8 @@
 import javax.management.ObjectName;
 import java.lang.management.ManagementFactory;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Make sure HA-related metrics are updated and reported appropriately.
@@ -74,7 +74,7 @@
           new ObjectName("Hadoop:service=NameNode,name=NameNodeStatus");
       final Long ltt1 =
           (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime");
-      assertTrue("lastHATransitionTime should be > 0", ltt1 > 0);
+        assertTrue(ltt1 > 0, "lastHATransitionTime should be > 0");
       
       assertEquals("active", nn0.getHAState());
       assertEquals(0, nn0.getMillisSinceLastLoadedEdits());
@@ -84,7 +84,7 @@
       cluster.transitionToStandby(0);
       final Long ltt2 =
           (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime");
-      assertTrue("lastHATransitionTime should be > " + ltt1, ltt2 > ltt1);
+        assertTrue(ltt2 > ltt1, "lastHATransitionTime should be > " + ltt1);
       cluster.transitionToActive(1);
       
       assertEquals("standby", nn0.getHAState());
@@ -112,11 +112,11 @@
       assertEquals(0, nn0.getPendingDataNodeMessageCount());
       assertEquals(0, nn1.getPendingDataNodeMessageCount());
       long newMillisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits();
-      // Since we just waited for the standby to catch up, the time since we
-      // last loaded edits should be very low.
-      assertTrue("expected " + millisSinceLastLoadedEdits + " > " +
-          newMillisSinceLastLoadedEdits,
-          millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits);
+        // Since we just waited for the standby to catch up, the time since we
+        // last loaded edits should be very low.
+        assertTrue(
+                millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits, "expected " + millisSinceLastLoadedEdits + " > " +
+                newMillisSinceLastLoadedEdits);
     } finally {
       IOUtils.cleanupWithLogger(LOG, fs);
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
index 537e6a3..823ef4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -69,9 +66,9 @@
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.Lists;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 import java.util.function.Supplier;
@@ -93,7 +90,7 @@
     GenericTestUtils.setLogLevel(FSImage.LOG, Level.TRACE);
   }
   
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -115,7 +112,7 @@
     cluster.transitionToActive(0);
   }
   
-  @After
+  @AfterEach
   public void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -155,10 +152,10 @@
         }
       }
     }.start();
-    
-    // make sure the client's call has actually been handled by the active NN
-    assertFalse("The directory should not be created while NN in safemode",
-        fs.exists(test));
+
+      // make sure the client's call has actually been handled by the active NN
+      assertFalse(
+              fs.exists(test), "The directory should not be created while NN in safemode");
     
     Thread.sleep(1000);
     // let nn0 leave safemode
@@ -203,14 +200,14 @@
 
     FSNamesystem namesystem = nn0.getNamesystem();
     String status = namesystem.getSafemode();
-    assertTrue("Bad safemode status: '" + status + "'", status
-        .startsWith("Safe mode is ON."));
+      assertTrue(status
+              .startsWith("Safe mode is ON."), "Bad safemode status: '" + status + "'");
     NameNodeAdapter.enterSafeMode(nn0, false);
-    assertTrue("Failed to enter into safemode in active", namesystem
-        .isInSafeMode());
+      assertTrue(namesystem
+              .isInSafeMode(), "Failed to enter into safemode in active");
     NameNodeAdapter.enterSafeMode(nn0, false);
-    assertTrue("Failed to enter into safemode in active", namesystem
-        .isInSafeMode());
+      assertTrue(namesystem
+              .isInSafeMode(), "Failed to enter into safemode in active");
   }
 
   /**
@@ -234,14 +231,14 @@
     restartStandby();
     FSNamesystem namesystem = nn1.getNamesystem();
     String status = namesystem.getSafemode();
-    assertTrue("Bad safemode status: '" + status + "'", status
-        .startsWith("Safe mode is ON."));
+      assertTrue(status
+              .startsWith("Safe mode is ON."), "Bad safemode status: '" + status + "'");
     NameNodeAdapter.enterSafeMode(nn1, false);
-    assertTrue("Failed to enter into safemode in standby", namesystem
-        .isInSafeMode());
+      assertTrue(namesystem
+              .isInSafeMode(), "Failed to enter into safemode in standby");
     NameNodeAdapter.enterSafeMode(nn1, false);
-    assertTrue("Failed to enter into safemode in standby", namesystem
-        .isInSafeMode());
+      assertTrue(namesystem
+              .isInSafeMode(), "Failed to enter into safemode in standby");
   }
 
   private void restartActive() throws IOException {
@@ -500,37 +497,37 @@
     int numNodes, int nodeThresh) {
     String status = nn.getNamesystem().getSafemode();
     if (total == 0 && nodeThresh == 0) {
-      assertTrue("Bad safemode status: '" + status + "'",
-          status.isEmpty()
-              || status.startsWith("Safe mode is ON. The reported blocks 0 " +
-              "has reached the threshold 0.9990 of total blocks 0. The " +
-              "minimum number of live datanodes is not required. In safe " +
-              "mode extension. Safe mode will be turned off automatically " +
-              "in 0 seconds."));
+        assertTrue(
+                status.isEmpty()
+                        || status.startsWith("Safe mode is ON. The reported blocks 0 " +
+                        "has reached the threshold 0.9990 of total blocks 0. The " +
+                        "minimum number of live datanodes is not required. In safe " +
+                        "mode extension. Safe mode will be turned off automatically " +
+                        "in 0 seconds."), "Bad safemode status: '" + status + "'");
     } else if (safe == total) {
       if (nodeThresh == 0) {
-        assertTrue("Bad safemode status: '" + status + "'",
-            status.startsWith("Safe mode is ON. The reported blocks " + safe
-                + " has reached the " + "threshold 0.9990 of total blocks "
-                + total + ". The minimum number of live datanodes is not "
-                + "required. In safe mode extension. Safe mode will be turned "
-                + "off automatically"));
+          assertTrue(
+                  status.startsWith("Safe mode is ON. The reported blocks " + safe
+                          + " has reached the " + "threshold 0.9990 of total blocks "
+                          + total + ". The minimum number of live datanodes is not "
+                          + "required. In safe mode extension. Safe mode will be turned "
+                          + "off automatically"), "Bad safemode status: '" + status + "'");
       } else {
-        assertTrue("Bad safemode status: '" + status + "'",
-            status.startsWith(
-                "Safe mode is ON. The reported blocks " + safe + " has reached "
-                    + "the threshold 0.9990 of total blocks " + total + ". The "
-                    + "number of live datanodes " + numNodes + " has reached "
-                    + "the minimum number " + nodeThresh + ". In safe mode "
-                    + "extension. Safe mode will be turned off automatically"));
+          assertTrue(
+                  status.startsWith(
+                          "Safe mode is ON. The reported blocks " + safe + " has reached "
+                                  + "the threshold 0.9990 of total blocks " + total + ". The "
+                                  + "number of live datanodes " + numNodes + " has reached "
+                                  + "the minimum number " + nodeThresh + ". In safe mode "
+                                  + "extension. Safe mode will be turned off automatically"), "Bad safemode status: '" + status + "'");
       }
     } else {
       int additional = (int) (total * 0.9990) - safe;
-      assertTrue("Bad safemode status: '" + status + "'",
-          status.startsWith(
-              "Safe mode is ON. " +
-              "The reported blocks " + safe + " needs additional " +
-              additional + " blocks"));
+        assertTrue(
+                status.startsWith(
+                        "Safe mode is ON. " +
+                                "The reported blocks " + safe + " needs additional " +
+                                additional + " blocks"), "Bad safemode status: '" + status + "'");
     }
   }
 
@@ -589,12 +586,12 @@
     
     // It will initially have all of the blocks necessary.
     String status = nn1.getNamesystem().getSafemode();
-    assertTrue("Bad safemode status: '" + status + "'",
-      status.startsWith(
-        "Safe mode is ON. The reported blocks 10 has reached the threshold "
-        + "0.9990 of total blocks 10. The minimum number of live datanodes is "
-        + "not required. In safe mode extension. Safe mode will be turned off "
-        + "automatically"));
+      assertTrue(
+              status.startsWith(
+                      "Safe mode is ON. The reported blocks 10 has reached the threshold "
+                              + "0.9990 of total blocks 10. The minimum number of live datanodes is "
+                              + "not required. In safe mode extension. Safe mode will be turned off "
+                              + "automatically"), "Bad safemode status: '" + status + "'");
 
     // Delete those blocks while the SBN is in safe mode.
     // Immediately roll the edit log before the actual deletions are sent
@@ -788,7 +785,7 @@
   public void testIsInSafemode() throws Exception {
     // Check for the standby nn without client failover.
     NameNode nn2 = cluster.getNameNode(1);
-    assertTrue("nn2 should be in standby state", nn2.isStandbyState());
+      assertTrue(nn2.isStandbyState(), "nn2 should be in standby state");
 
     InetSocketAddress nameNodeAddress = nn2.getNameNodeAddress();
     Configuration conf = new Configuration();
@@ -801,11 +798,11 @@
       fail("StandBy should throw exception for isInSafeMode");
     } catch (IOException e) {
       if (e instanceof RemoteException) {
-        assertEquals("RPC Error code should indicate app failure.", RpcErrorCodeProto.ERROR_APPLICATION,
-            ((RemoteException) e).getErrorCode());
+          assertEquals(RpcErrorCodeProto.ERROR_APPLICATION,
+                  ((RemoteException) e).getErrorCode(), "RPC Error code should indicate app failure.");
         IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
-        assertTrue("StandBy nn should not support isInSafeMode",
-            sbExcpetion instanceof StandbyException);
+          assertTrue(
+                  sbExcpetion instanceof StandbyException, "StandBy nn should not support isInSafeMode");
       } else {
         throw e;
       }
@@ -820,10 +817,10 @@
     cluster.transitionToActive(1);
     cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
     DistributedFileSystem dfsWithFailOver = (DistributedFileSystem) fs;
-    assertTrue("ANN should be in SafeMode", dfsWithFailOver.isInSafeMode());
+      assertTrue(dfsWithFailOver.isInSafeMode(), "ANN should be in SafeMode");
 
     cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
-    assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
+      assertFalse(dfsWithFailOver.isInSafeMode(), "ANN should be out of SafeMode");
   }
 
   /** Test NN crash and client crash/stuck immediately after block allocation */
@@ -870,7 +867,7 @@
       FSDataInputStream is = dfs.open(filePath);
       is.close();
       dfs.recoverLease(filePath);// initiate recovery
-      assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
+        assertTrue(dfs.recoverLease(filePath), "Recovery also should be success");
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
index 5622edb..ae90b58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
@@ -42,8 +42,8 @@
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.slf4j.event.Level;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 
 import java.io.DataOutputStream;
@@ -55,7 +55,7 @@
 import java.util.List;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Tests state transition from active->standby, and manual failover
@@ -308,15 +308,15 @@
       long nn0t0 = NameNodeAdapter.getLeaseRenewalTime(nn0, TEST_FILE_STR);
       assertTrue(nn0t0 > 0);
       long nn1t0 = NameNodeAdapter.getLeaseRenewalTime(nn1, TEST_FILE_STR);
-      assertEquals("Lease should not yet exist on nn1",
-          -1, nn1t0);
+        assertEquals(
+                -1, nn1t0, "Lease should not yet exist on nn1");
       
       Thread.sleep(5); // make sure time advances!
 
       HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
       long nn1t1 = NameNodeAdapter.getLeaseRenewalTime(nn1, TEST_FILE_STR);
-      assertTrue("Lease should have been created on standby. Time was: " +
-          nn1t1, nn1t1 > nn0t0);
+        assertTrue(nn1t1 > nn0t0, "Lease should have been created on standby. Time was: " +
+                nn1t1);
           
       Thread.sleep(5); // make sure time advances!
       
@@ -324,8 +324,8 @@
       cluster.transitionToStandby(0);
       cluster.transitionToActive(1);
       long nn1t2 = NameNodeAdapter.getLeaseRenewalTime(nn1, TEST_FILE_STR);
-      assertTrue("Lease should have been renewed by failover process",
-          nn1t2 > nn1t1);
+        assertTrue(
+                nn1t2 > nn1t1, "Lease should have been renewed by failover process");
     } finally {
       IOUtils.closeStream(stm);
       cluster.shutdown();
@@ -362,7 +362,7 @@
       nn2.getRpcServer().renewDelegationToken(token);
       nn2.getRpcServer().cancelDelegationToken(token);
       token = nn2.getRpcServer().getDelegationToken(new Text(renewer));
-      Assert.assertTrue(token != null);
+      Assertions.assertTrue(token != null);
     } finally {
       cluster.shutdown();
     }
@@ -433,8 +433,8 @@
     StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
     File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
         txid + 1);
-    assertTrue("Failed to create in-progress edits file",
-        inProgressFile.createNewFile());
+      assertTrue(
+              inProgressFile.createNewFile(), "Failed to create in-progress edits file");
     
     if (writeHeader) {
       DataOutputStream out = new DataOutputStream(new FileOutputStream(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHarFileSystemWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHarFileSystemWithHA.java
index 311f260..d162076 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHarFileSystemWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHarFileSystemWithHA.java
@@ -28,7 +28,7 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestHarFileSystemWithHA {
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
index 0705c19..71f9ae9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
@@ -17,11 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -44,9 +40,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestInitializeSharedEdits {
 
@@ -57,7 +53,7 @@
   private Configuration conf;
   private MiniDFSCluster cluster;
   
-  @Before
+  @BeforeEach
   public void setupCluster() throws IOException {
     conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
@@ -75,7 +71,7 @@
     shutdownClusterAndRemoveSharedEditsDir();
   }
   
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java
index 8cdd445..96ea2d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test makes sure that when
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestMultiObserverNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestMultiObserverNode.java
index a0913e4..10665ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestMultiObserverNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestMultiObserverNode.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
@@ -28,10 +28,10 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests multiple ObserverNodes.
@@ -44,7 +44,7 @@
 
   private final Path testPath= new Path("/TestMultiObserverNode");
 
-  @BeforeClass
+  @BeforeAll
   public static void startUpCluster() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, true);
@@ -54,12 +54,12 @@
         dfsCluster, conf, ObserverReadProxyProvider.class, true);
   }
 
-  @After
+  @AfterEach
   public void cleanUp() throws IOException {
     dfs.delete(testPath, true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutDownCluster() throws IOException {
     if (qjmhaCluster != null) {
       qjmhaCluster.shutdown();
@@ -155,7 +155,7 @@
   }
 
   private void assertSentTo(int... nnIndices) throws IOException {
-    assertTrue("Request was not sent to any of the expected namenodes.",
-        HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIndices));
+      assertTrue(
+              HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIndices), "Request was not sent to any of the expected namenodes.");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java
index ab7e0af..66dc03d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java
@@ -21,8 +21,8 @@
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HA_HM_RPC_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
@@ -38,21 +38,21 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestNNHealthCheck {
 
   private MiniDFSCluster cluster;
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setup() {
     conf = new Configuration();
   }
 
-  @After
+  @AfterEach
   public void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -96,9 +96,9 @@
         DFSUtil.getNamenodeNameServiceId(conf), "nn1");
     final String expectedTargetString = haTarget.getAddress().toString();
 
-    assertTrue("Expected haTarget " + haTarget + " containing " +
-            expectedTargetString,
-        haTarget.toString().contains(expectedTargetString));
+      assertTrue(
+              haTarget.toString().contains(expectedTargetString), "Expected haTarget " + haTarget + " containing " +
+              expectedTargetString);
     HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, 5000);
 
     LambdaTestUtils.intercept(RemoteException.class,
@@ -121,9 +121,9 @@
     } else {
       expectedTargetString = haTarget.getAddress().toString();
     }
-    assertTrue("Expected haTarget " + haTarget + " containing " +
-        expectedTargetString,
-        haTarget.toString().contains(expectedTargetString));
+      assertTrue(
+              haTarget.toString().contains(expectedTargetString), "Expected haTarget " + haTarget + " containing " +
+              expectedTargetString);
     HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, conf.getInt(
         HA_HM_RPC_TIMEOUT_KEY, HA_HM_RPC_TIMEOUT_DEFAULT));
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
index 29cae6f..15990f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
@@ -20,9 +20,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getServiceState;
 import static org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -66,11 +64,7 @@
 import org.apache.hadoop.ipc.ObserverRetryOnActiveException;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -89,7 +83,7 @@
 
   private final Path testPath= new Path("/TestObserverNode");
 
-  @BeforeClass
+  @BeforeAll
   public static void startUpCluster() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, true);
@@ -101,23 +95,23 @@
     dfsCluster = qjmhaCluster.getDfsCluster();
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     setObserverRead(true);
   }
 
-  @After
+  @AfterEach
   public void cleanUp() throws IOException {
     dfs.delete(testPath, true);
-    assertEquals("NN[0] should be active", HAServiceState.ACTIVE,
-        getServiceState(dfsCluster.getNameNode(0)));
-    assertEquals("NN[1] should be standby", HAServiceState.STANDBY,
-        getServiceState(dfsCluster.getNameNode(1)));
-    assertEquals("NN[2] should be observer", HAServiceState.OBSERVER,
-        getServiceState(dfsCluster.getNameNode(2)));
+      assertEquals(HAServiceState.ACTIVE,
+              getServiceState(dfsCluster.getNameNode(0)), "NN[0] should be active");
+      assertEquals(HAServiceState.STANDBY,
+              getServiceState(dfsCluster.getNameNode(1)), "NN[1] should be standby");
+      assertEquals(HAServiceState.OBSERVER,
+              getServiceState(dfsCluster.getNameNode(2)), "NN[2] should be observer");
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutDownCluster() throws IOException {
     if (qjmhaCluster != null) {
       qjmhaCluster.shutdown();
@@ -449,16 +443,16 @@
     dfsCluster.rollEditLogAndTail(0);
     // No Observers present, should still go to Active
     dfsCluster.transitionToStandby(2);
-    assertEquals("NN[2] should be standby", HAServiceState.STANDBY,
-        getServiceState(dfsCluster.getNameNode(2)));
+      assertEquals(HAServiceState.STANDBY,
+              getServiceState(dfsCluster.getNameNode(2)), "NN[2] should be standby");
     newFs.open(testFile).close();
     assertSentTo(0);
     // Restore Observer
     int newObserver = 1;
     dfsCluster.transitionToObserver(newObserver);
-    assertEquals("NN[" + newObserver + "] should be observer",
-        HAServiceState.OBSERVER,
-        getServiceState(dfsCluster.getNameNode(newObserver)));
+      assertEquals(
+              HAServiceState.OBSERVER,
+              getServiceState(dfsCluster.getNameNode(newObserver)), "NN[" + newObserver + "] should be observer");
     long startTime = Time.monotonicNow();
     try {
       while(Time.monotonicNow() - startTime <= 5000) {
@@ -547,19 +541,19 @@
         LOG.warn("MkDirRunner thread failed", e.getCause());
       }
     }
-    assertTrue("Not all threads finished", finished);
+      assertTrue(finished, "Not all threads finished");
     threadPool.shutdown();
 
-    assertEquals("Active and Observer stateIds don't match",
-        dfsCluster.getNameNode(0).getFSImage().getLastAppliedOrWrittenTxId(),
-        dfsCluster.getNameNode(2).getFSImage().getLastAppliedOrWrittenTxId());
+      assertEquals(
+              dfsCluster.getNameNode(0).getFSImage().getLastAppliedOrWrittenTxId(),
+              dfsCluster.getNameNode(2).getFSImage().getLastAppliedOrWrittenTxId(), "Active and Observer stateIds don't match");
     for (int i = 0; i < numThreads; i++) {
-      assertTrue("Client #" + i
-          + " lastSeenStateId=" + clientStates[i].lastSeenStateId
-          + " activStateId=" + activStateId
-          + "\n" + clientStates[i].fnfe,
-          clientStates[i].lastSeenStateId >= activStateId &&
-          clientStates[i].fnfe == null);
+        assertTrue(
+                clientStates[i].lastSeenStateId >= activStateId &&
+                        clientStates[i].fnfe == null, "Client #" + i
+                + " lastSeenStateId=" + clientStates[i].lastSeenStateId
+                + " activStateId=" + activStateId
+                + "\n" + clientStates[i].fnfe);
     }
 
     // Restore edit log
@@ -593,7 +587,7 @@
 
         FileStatus stat = fs.getFileStatus(DIR_PATH);
         assertSentTo(fs, 2);
-        assertTrue("Should be a directory", stat.isDirectory());
+          assertTrue(stat.isDirectory(), "Should be a directory");
       } catch (FileNotFoundException ioe) {
         clientState.fnfe = ioe;
       } catch (Exception e) {
@@ -604,13 +598,13 @@
 
   private static void assertSentTo(DistributedFileSystem fs, int nnIdx)
       throws IOException {
-    assertTrue("Request was not sent to the expected namenode " + nnIdx,
-        HATestUtil.isSentToAnyOfNameNodes(fs, dfsCluster, nnIdx));
+      assertTrue(
+              HATestUtil.isSentToAnyOfNameNodes(fs, dfsCluster, nnIdx), "Request was not sent to the expected namenode " + nnIdx);
   }
 
   private void assertSentTo(int nnIdx) throws IOException {
-    assertTrue("Request was not sent to the expected namenode " + nnIdx,
-        HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIdx));
+      assertTrue(
+              HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIdx), "Request was not sent to the expected namenode " + nnIdx);
   }
 
   private static void setObserverRead(boolean flag) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
index 54b1159..5c2f5c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverReadProxyProvider.java
@@ -36,8 +36,8 @@
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -45,9 +45,7 @@
 import static org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 
 import static org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider.*;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -68,7 +66,7 @@
   private NameNodeAnswer[] namenodeAnswers;
   private String[] namenodeAddrs;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     ns = "testcluster";
     nnURI = URI.create("hdfs://" + ns);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
index a09bfba..79ed1a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.io.OutputStream;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index 260dd70..7b02d94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java
index cbd01b9..3c0fdae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestQuotasWithHA.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 
@@ -36,9 +36,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.StandbyException;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestQuotasWithHA {
   private static final Path TEST_DIR = new Path("/test");
@@ -54,7 +54,7 @@
   private NameNode nn1;
   private FileSystem fs;
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -76,7 +76,7 @@
     cluster.transitionToActive(0);
   }
   
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRemoteNameNodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRemoteNameNodeInfo.java
index cb2a4fc..ffddeec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRemoteNameNodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRemoteNameNodeInfo.java
@@ -21,11 +21,11 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.util.List;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test that we correctly obtain remote namenode information
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index e3e934b..d3ac756 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -89,9 +89,9 @@
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.RetryCache.CacheEntry;
 import org.apache.hadoop.util.LightWeightCache;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestRetryCacheWithHA {
   private static final Logger LOG =
@@ -137,7 +137,7 @@
     }
   }
   
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
@@ -154,7 +154,7 @@
     dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
   }
   
-  @After
+  @AfterEach
   public void cleanup() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -176,7 +176,7 @@
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 39, cacheSet.size());
+      assertEquals(39, cacheSet.size(), "Retry cache size is wrong");
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -197,7 +197,7 @@
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
         .getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 39, cacheSet.size());
+      assertEquals(39, cacheSet.size(), "Retry cache size is wrong");
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
@@ -1319,11 +1319,11 @@
         }
       }
     }.start();
-    
-    // make sure the client's call has actually been handled by the active NN
-    assertTrue("After waiting the operation " + op.name
-        + " still has not taken effect on NN yet",
-        op.checkNamenodeBeforeReturn());
+
+      // make sure the client's call has actually been handled by the active NN
+      assertTrue(
+              op.checkNamenodeBeforeReturn(), "After waiting the operation " + op.name
+              + " still has not taken effect on NN yet");
     
     // force the failover
     cluster.transitionToStandby(0);
@@ -1351,8 +1351,8 @@
       return (hitsNN[0] + hitsNN[1]) > 0;
     }, 5, 10000);
 
-    assertTrue("CacheHit: " + hitsNN[0] + ", " + hitsNN[1],
-        +hitsNN[0] + hitsNN[1] > 0);
+      assertTrue(
+              +hitsNN[0] + hitsNN[1] > 0, "CacheHit: " + hitsNN[0] + ", " + hitsNN[1]);
     final long[] updatesNN = new long[]{0, 0};
     GenericTestUtils.waitFor(() -> {
       updatesNN[0] = cluster.getNamesystem(0).getRetryCache()
@@ -1363,16 +1363,16 @@
           .getCacheUpdated();
       return updatesNN[0] > 0 && updatesNN[1] > 0;
     }, 5, 10000);
-    // Cache updated metrics on NN0 should be >0 since the op was process on NN0
-    assertTrue("CacheUpdated on NN0: " + updatesNN[0], updatesNN[0] > 0);
-    // Cache updated metrics on NN0 should be >0 since NN1 applied the editlog
-    assertTrue("CacheUpdated on NN1: " + updatesNN[1], updatesNN[1] > 0);
+      // Cache updated metrics on NN0 should be >0 since the op was process on NN0
+      assertTrue(updatesNN[0] > 0, "CacheUpdated on NN0: " + updatesNN[0]);
+      // Cache updated metrics on NN0 should be >0 since NN1 applied the editlog
+      assertTrue(updatesNN[1] > 0, "CacheUpdated on NN1: " + updatesNN[1]);
     long expectedUpdateCount = op.getExpectedCacheUpdateCount();
     if (expectedUpdateCount > 0) {
-      assertEquals("CacheUpdated on NN0: " + updatesNN[0], expectedUpdateCount,
-          updatesNN[0]);
-      assertEquals("CacheUpdated on NN0: " + updatesNN[1], expectedUpdateCount,
-          updatesNN[1]);
+        assertEquals(expectedUpdateCount,
+                updatesNN[0], "CacheUpdated on NN0: " + updatesNN[0]);
+        assertEquals(expectedUpdateCount,
+                updatesNN[1], "CacheUpdated on NN0: " + updatesNN[1]);
     }
   }
 
@@ -1431,7 +1431,7 @@
     for (int i=0; i<poolCount; i++) {
       CachePoolEntry pool = pools.next();
       String pollName = pool.getInfo().getPoolName();
-      assertTrue("The pool name should be expected", tmpNames.remove(pollName));
+        assertTrue(tmpNames.remove(pollName), "The pool name should be expected");
       if (i % 2 == 0) {
         int standby = active;
         active = (standby == 0) ? 1 : 0;
@@ -1440,7 +1440,7 @@
         cluster.waitActive(active);
       }
     }
-    assertTrue("All pools must be found", tmpNames.isEmpty());
+      assertTrue(tmpNames.isEmpty(), "All pools must be found");
   }
 
   @SuppressWarnings("unchecked")
@@ -1452,7 +1452,7 @@
     for (int i=0; i<poolCount; i++) {
       CacheDirectiveEntry directive = directives.next();
       String pollName = directive.getInfo().getPool();
-      assertTrue("The pool name should be expected", tmpNames.remove(pollName));
+        assertTrue(tmpNames.remove(pollName), "The pool name should be expected");
       if (i % 2 == 0) {
         int standby = active;
         active = (standby == 0) ? 1 : 0;
@@ -1461,6 +1461,6 @@
         cluster.waitActive(active);
       }
     }
-    assertTrue("All pools must be found", tmpNames.isEmpty());
+      assertTrue(tmpNames.isEmpty(), "All pools must be found");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java
index 6eda1e3..6ff3298 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -35,7 +35,7 @@
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test that we can start several and run with namenodes on the same minicluster
@@ -92,10 +92,10 @@
           }
         }
       }
-      assertEquals(
-          "Some writers didn't complete in expected runtime! Current writer state:"
-              + writers, 0,
-          writers.size());
+        assertEquals(0,
+                writers.size(),
+                "Some writers didn't complete in expected runtime! Current writer state:"
+                        + writers);
 
       harness.stopThreads();
     } finally {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java
index 74c6f21..64a449b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java
@@ -32,7 +32,7 @@
 import org.junit.Test;
 import org.slf4j.event.Level;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Makes sure that standby doesn't do the unnecessary block management such as
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index 513f60c..14371b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -54,9 +54,9 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ThreadUtil;
 import org.apache.log4j.spi.LoggingEvent;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -65,7 +65,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 
 public class TestStandbyCheckpoints {
@@ -80,7 +80,7 @@
   private static final Logger LOG = LoggerFactory.getLogger(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     Configuration conf = setupCommonConfig();
 
@@ -143,7 +143,7 @@
     return conf;
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -175,9 +175,9 @@
         }
       }
     }, 1000, 60000);
-    
-    // It should have saved the oiv image too.
-    assertEquals("One file is expected", 1, tmpOivImgDir.list().length);
+
+      // It should have saved the oiv image too.
+      assertEquals(1, tmpOivImgDir.list().length, "One file is expected");
     
     // It should also upload it back to the active.
     HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
@@ -496,8 +496,8 @@
     doEdits(0, 1000);
     nns[0].getRpcServer().rollEditLog();
     answerer.waitForCall();
-    assertTrue("SBN is not performing checkpoint but it should be.",
-        answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
+      assertTrue(
+              answerer.getFireCount() == 1 && answerer.getResultCount() == 0, "SBN is not performing checkpoint but it should be.");
     
     // Make sure that the lock has actually been taken by the checkpointing
     // thread.
@@ -516,15 +516,15 @@
     doCreate();
     Thread.sleep(1000);
     assertTrue(cluster.getNamesystem(1).getPendingDataNodeMessageCount() > 0);
-    
-    // Make sure that the checkpoint is still going on, implying that the client
-    // RPC to the SBN happened during the checkpoint.
-    assertTrue("SBN should have still been checkpointing.",
-        answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
+
+      // Make sure that the checkpoint is still going on, implying that the client
+      // RPC to the SBN happened during the checkpoint.
+      assertTrue(
+              answerer.getFireCount() == 1 && answerer.getResultCount() == 0, "SBN should have still been checkpointing.");
     answerer.proceed();
     answerer.waitForResult();
-    assertTrue("SBN should have finished checkpointing.",
-        answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
+      assertTrue(
+              answerer.getFireCount() == 1 && answerer.getResultCount() == 1, "SBN should have finished checkpointing.");
   }
   
   @Test(timeout=300000)
@@ -542,8 +542,8 @@
     doEdits(0, 1000);
     nns[0].getRpcServer().rollEditLog();
     answerer.waitForCall();
-    assertTrue("SBN is not performing checkpoint but it should be.",
-        answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
+      assertTrue(
+              answerer.getFireCount() == 1 && answerer.getResultCount() == 0, "SBN is not performing checkpoint but it should be.");
     
     // Make sure that the lock has actually been taken by the checkpointing
     // thread.
@@ -575,15 +575,15 @@
         nns[1].getHttpAddress().getHostName() + ":" +
         nns[1].getHttpAddress().getPort() + "/jmx"));
     assertTrue(pageContents.contains("NumLiveDataNodes"));
-    
-    // Make sure that the checkpoint is still going on, implying that the client
-    // RPC to the SBN happened during the checkpoint.
-    assertTrue("SBN should have still been checkpointing.",
-        answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
+
+      // Make sure that the checkpoint is still going on, implying that the client
+      // RPC to the SBN happened during the checkpoint.
+      assertTrue(
+              answerer.getFireCount() == 1 && answerer.getResultCount() == 0, "SBN should have still been checkpointing.");
     answerer.proceed();
     answerer.waitForResult();
-    assertTrue("SBN should have finished checkpointing.",
-        answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
+      assertTrue(
+              answerer.getFireCount() == 1 && answerer.getResultCount() == 1, "SBN should have finished checkpointing.");
     
     t.join();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java
index afe564d..3b5f5a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.FilenameFilter;
@@ -45,9 +43,9 @@
 import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo;
 import static org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager.QJM_RPC_MAX_TXNS_KEY;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -66,7 +64,7 @@
   private NameNode nn0;
   private NameNode nn1;
 
-  @Before
+  @BeforeEach
   public void startUp() throws IOException {
     conf = new Configuration();
     // Set period of tail edits to a large value (20 mins) for test purposes
@@ -85,7 +83,7 @@
     nn1 = cluster.getNameNode(1);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (qjmhaCluster != null) {
       qjmhaCluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
index b45e226..3edcced 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 
@@ -39,8 +39,8 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.event.Level;
 
 import java.util.function.Supplier;
@@ -175,8 +175,8 @@
       
       LocatedBlocks locs = nn1.getRpcServer().getBlockLocations(
           TEST_FILE, 0, 1);
-      assertEquals("Standby should have registered that the block has no replicas",
-          0, locs.get(0).getLocations().length);
+        assertEquals(
+                0, locs.get(0).getLocations().length, "Standby should have registered that the block has no replicas");
       
       cluster.restartDataNode(dnProps);
       // Wait for both NNs to re-register the DN.
@@ -192,8 +192,8 @@
       
       locs = nn1.getRpcServer().getBlockLocations(
           TEST_FILE, 0, 1);
-      assertEquals("Standby should have registered that the block has replicas again",
-          1, locs.get(0).getLocations().length);
+        assertEquals(
+                1, locs.get(0).getLocations().length, "Standby should have registered that the block has replicas again");
     } finally {
       cluster.shutdown();
     }
@@ -211,7 +211,7 @@
           LocatedBlocks locs = NameNodeAdapter.getBlockLocations(nn, path, 0, 1000);
           DatanodeInfo[] dnis = locs.getLastLocatedBlock().getLocations();
           for (DatanodeInfo dni : dnis) {
-            Assert.assertNotNull(dni);
+            Assertions.assertNotNull(dni);
           }
           int numReplicas = dnis.length;
           
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
index d3e5cf2..7939c54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
@@ -27,7 +27,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests to verify the behavior of failing to fully start transition HA states.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java
index 1462314..7c359b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestUpdateBlockTailing.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -46,10 +46,10 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 
 /**
@@ -67,7 +67,7 @@
   private static FSNamesystem fsn1;
   private static DataNode dn0;
 
-  @BeforeClass
+  @BeforeAll
   public static void startUpCluster() throws Exception {
     Configuration conf = new Configuration();
     conf.setBoolean(DFS_HA_TAILEDITS_INPROGRESS_KEY, true);
@@ -85,14 +85,14 @@
     dn0 = dfsCluster.getDataNodes().get(0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutDownCluster() throws IOException {
     if (qjmhaCluster != null) {
       qjmhaCluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void reset() throws Exception {
     dfsCluster.transitionToStandby(1);
     dfsCluster.transitionToActive(0);
@@ -102,10 +102,10 @@
   public void testStandbyAddBlockIBRRace() throws Exception {
     String testFile = TEST_DIR +"/testStandbyAddBlockIBRRace";
 
-    // initial global generation stamp check
-    assertEquals("Global Generation stamps on NNs should be the same",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      // initial global generation stamp check
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on NNs should be the same");
 
     // create a file, add a block on NN0
     // do not journal addBlock yet
@@ -123,10 +123,10 @@
     fsn0.getEditLog().logSync();
     fsn1.getEditLogTailer().doTailEdits();
 
-    assertEquals("Global Generation stamps on NN0 and "
-            + "impending on NN1 should be equal",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getImpendingGenerationStamp(fsn1));
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getImpendingGenerationStamp(fsn1), "Global Generation stamps on NN0 and "
+              + "impending on NN1 should be equal");
 
     // NN1 processes IBR with the replica
     StorageReceivedDeletedBlocks[] report = DFSTestUtil
@@ -143,20 +143,20 @@
     fsn0.getEditLog().logSync();
     fsn1.getEditLogTailer().doTailEdits();
 
-    assertEquals("Global Generation stamps on NN0 and "
-            + "impending on NN1 should be equal",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getImpendingGenerationStamp(fsn1));
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getImpendingGenerationStamp(fsn1), "Global Generation stamps on NN0 and "
+              + "impending on NN1 should be equal");
 
     // The new block on NN1 should have the replica
     BlockInfo newBlock1 = NameNodeAdapter.getStoredBlock(fsn1, newBlock);
-    assertTrue("New block on NN1 should contain the replica",
-        newBlock1.getStorageInfos().hasNext());
-    assertEquals("Generation stamps of the block on NNs should be the same",
-        newBlock.getGenerationStamp(), newBlock1.getGenerationStamp());
-    assertEquals("Global Generation stamps on NNs should be the same",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      assertTrue(
+              newBlock1.getStorageInfos().hasNext(), "New block on NN1 should contain the replica");
+      assertEquals(
+              newBlock.getGenerationStamp(), newBlock1.getGenerationStamp(), "Generation stamps of the block on NNs should be the same");
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on NNs should be the same");
 
     // Check that the generation stamp restores on Standby after failover
     ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer();
@@ -166,9 +166,9 @@
     long gs0 = NameNodeAdapter.getGenerationStamp(fsn0);
     dfsCluster.transitionToStandby(0);
     dfsCluster.transitionToActive(1);
-    assertEquals("Global Generation stamps on new active should be "
-            + "the same as on the old one", gs0,
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      assertEquals(gs0,
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on new active should be "
+              + "the same as on the old one");
 
     rpc1.delete(testFile, false);
   }
@@ -182,10 +182,10 @@
     // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK
     fsn0.getEditLog().logSync();
     fsn1.getEditLogTailer().doTailEdits();
-    assertEquals("Global Generation stamps on NN0 and "
-            + "NN1 should be equal",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on NN0 and "
+              + "NN1 should be equal");
 
     // Append block without newBlock flag
     try (FSDataOutputStream out = dfs.append(new Path(testFile))) {
@@ -197,10 +197,10 @@
     // NN1 tails OP_APPEND, OP_SET_GENSTAMP_V2, and OP_UPDATE_BLOCKS
     fsn0.getEditLog().logSync();
     fsn1.getEditLogTailer().doTailEdits();
-    assertEquals("Global Generation stamps on NN0 and "
-            + "NN1 should be equal",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on NN0 and "
+              + "NN1 should be equal");
 
     // Remove the testFile
     final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer();
@@ -216,10 +216,10 @@
     // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK
     fsn0.getEditLog().logSync();
     fsn1.getEditLogTailer().doTailEdits();
-    assertEquals("Global Generation stamps on NN0 and "
-            + "NN1 should be equal",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on NN0 and "
+              + "NN1 should be equal");
 
     // Append block with newBlock flag
     try (FSDataOutputStream out = dfs.append(new Path(testFile),
@@ -232,10 +232,10 @@
     // NN1 tails OP_APPEND, OP_SET_GENSTAMP_V2, and OP_ADD_BLOCK
     fsn0.getEditLog().logSync();
     fsn1.getEditLogTailer().doTailEdits();
-    assertEquals("Global Generation stamps on NN0 and "
-            + "NN1 should be equal",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on NN0 and "
+              + "NN1 should be equal");
 
     // Remove the testFile
     final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer();
@@ -251,10 +251,10 @@
     // NN1 tails OP_SET_GENSTAMP_V2 and OP_ADD_BLOCK
     fsn0.getEditLog().logSync();
     fsn1.getEditLogTailer().doTailEdits();
-    assertEquals("Global Generation stamps on NN0 and "
-            + "NN1 should be equal",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on NN0 and "
+              + "NN1 should be equal");
 
     // Truncate block
     dfs.truncate(new Path(testFile), fileLen/2);
@@ -262,10 +262,10 @@
     // NN1 tails OP_SET_GENSTAMP_V2 and OP_TRUNCATE
     fsn0.getEditLog().logSync();
     fsn1.getEditLogTailer().doTailEdits();
-    assertEquals("Global Generation stamps on NN0 and "
-            + "NN1 should be equal",
-        NameNodeAdapter.getGenerationStamp(fsn0),
-        NameNodeAdapter.getGenerationStamp(fsn1));
+      assertEquals(
+              NameNodeAdapter.getGenerationStamp(fsn0),
+              NameNodeAdapter.getGenerationStamp(fsn1), "Global Generation stamps on NN0 and "
+              + "NN1 should be equal");
 
     // Remove the testFile
     final ClientProtocol rpc0 = dfsCluster.getNameNode(0).getRpcServer();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java
index bb44d05..d263f66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestXAttrsWithHA.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -34,10 +34,10 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Tests interaction of XAttrs with HA failover.
@@ -58,7 +58,7 @@
   private NameNode nn1;
   private FileSystem fs;
 
-  @Before
+  @BeforeEach
   public void setupCluster() throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
@@ -78,7 +78,7 @@
     cluster.transitionToActive(0);
   }
   
-  @After
+  @AfterEach
   public void shutdownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -105,9 +105,9 @@
     cluster.transitionToActive(1);
     
     Map<String, byte[]> xattrs = fs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
     
     fs.delete(path, true);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
index c11f880..db078da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
@@ -31,9 +31,9 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test case for FilesInGetListingOps metric in Namenode
@@ -52,7 +52,7 @@
   private DistributedFileSystem fs;
   private final Random rand = new Random();
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     cluster = new MiniDFSCluster.Builder(CONF).build();
     cluster.waitActive();
@@ -60,7 +60,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 349b7ac..2a38708 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -37,8 +37,8 @@
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.File;
@@ -90,9 +90,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test for metrics published by the Namenode
@@ -158,7 +158,7 @@
     return new Path(TEST_ROOT_DIR_PATH, fileName);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     hostsFileWriter = new HostsFileWriter();
     hostsFileWriter.initialize(CONF, "temp/decommission");
@@ -174,7 +174,7 @@
     fs.setErasureCodingPolicy(ecDir, EC_POLICY.getName());
   }
   
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     MetricsSource source = DefaultMetricsSystem.instance().getSource("UgiMetrics");
     if (source != null) {
@@ -439,32 +439,32 @@
    */
   private void verifyAggregatedMetricsTally() throws Exception {
     BlockManagerTestUtil.updateState(bm);
-    assertEquals("Under replicated metrics not matching!",
-        namesystem.getLowRedundancyBlocks(),
-        namesystem.getUnderReplicatedBlocks());
-    assertEquals("Low redundancy metrics not matching!",
-        namesystem.getLowRedundancyBlocks(),
-        namesystem.getLowRedundancyReplicatedBlocks() +
-            namesystem.getLowRedundancyECBlockGroups());
-    assertEquals("Corrupt blocks metrics not matching!",
-        namesystem.getCorruptReplicaBlocks(),
-        namesystem.getCorruptReplicatedBlocks() +
-            namesystem.getCorruptECBlockGroups());
-    assertEquals("Missing blocks metrics not matching!",
-        namesystem.getMissingBlocksCount(),
-        namesystem.getMissingReplicatedBlocks() +
-            namesystem.getMissingECBlockGroups());
-    assertEquals("Missing blocks with replication factor one not matching!",
-        namesystem.getMissingReplOneBlocksCount(),
-        namesystem.getMissingReplicationOneBlocks());
-    assertEquals("Bytes in future blocks metrics not matching!",
-        namesystem.getBytesInFuture(),
-        namesystem.getBytesInFutureReplicatedBlocks() +
-            namesystem.getBytesInFutureECBlockGroups());
-    assertEquals("Pending deletion blocks metrics not matching!",
-        namesystem.getPendingDeletionBlocks(),
-        namesystem.getPendingDeletionReplicatedBlocks() +
-            namesystem.getPendingDeletionECBlocks());
+      assertEquals(
+              namesystem.getLowRedundancyBlocks(),
+              namesystem.getUnderReplicatedBlocks(), "Under replicated metrics not matching!");
+      assertEquals(
+              namesystem.getLowRedundancyBlocks(),
+              namesystem.getLowRedundancyReplicatedBlocks() +
+                      namesystem.getLowRedundancyECBlockGroups(), "Low redundancy metrics not matching!");
+      assertEquals(
+              namesystem.getCorruptReplicaBlocks(),
+              namesystem.getCorruptReplicatedBlocks() +
+                      namesystem.getCorruptECBlockGroups(), "Corrupt blocks metrics not matching!");
+      assertEquals(
+              namesystem.getMissingBlocksCount(),
+              namesystem.getMissingReplicatedBlocks() +
+                      namesystem.getMissingECBlockGroups(), "Missing blocks metrics not matching!");
+      assertEquals(
+              namesystem.getMissingReplOneBlocksCount(),
+              namesystem.getMissingReplicationOneBlocks(), "Missing blocks with replication factor one not matching!");
+      assertEquals(
+              namesystem.getBytesInFuture(),
+              namesystem.getBytesInFutureReplicatedBlocks() +
+                      namesystem.getBytesInFutureECBlockGroups(), "Bytes in future blocks metrics not matching!");
+      assertEquals(
+              namesystem.getPendingDeletionBlocks(),
+              namesystem.getPendingDeletionReplicatedBlocks() +
+                      namesystem.getPendingDeletionECBlocks(), "Pending deletion blocks metrics not matching!");
   }
 
   /** Corrupt a block and ensure metrics reflects it */
@@ -830,12 +830,12 @@
         fs2.mkdirs(new Path("/tmp-t1"));
         fs2.mkdirs(new Path("/tmp-t2"));
         HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
-        // Test to ensure tracking works before the first-ever
-        // checkpoint.
-        assertEquals("SBN failed to track 2 transactions pre-checkpoint.",
-            4L, // 2 txns added further when catch-up is called.
-            cluster2.getNameNode(1).getNamesystem()
-              .getTransactionsSinceLastCheckpoint());
+          // Test to ensure tracking works before the first-ever
+          // checkpoint.
+          assertEquals(
+                  4L, // 2 txns added further when catch-up is called.
+                  cluster2.getNameNode(1).getNamesystem()
+                          .getTransactionsSinceLastCheckpoint(), "SBN failed to track 2 transactions pre-checkpoint.");
         // Complete up to the boundary required for
         // an auto-checkpoint. Using 94 to expect fsimage
         // rounded at 100, as 4 + 94 + 2 (catch-up call) = 100.
@@ -845,22 +845,22 @@
         HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
         // Assert 100 transactions in checkpoint.
         HATestUtil.waitForCheckpoint(cluster2, 1, ImmutableList.of(100));
-        // Test to ensure number tracks the right state of
-        // uncheckpointed edits, and does not go negative
-        // (as fixed in HDFS-7501).
-        assertEquals("Should be zero right after the checkpoint.",
-            0L,
-            cluster2.getNameNode(1).getNamesystem()
-              .getTransactionsSinceLastCheckpoint());
+          // Test to ensure number tracks the right state of
+          // uncheckpointed edits, and does not go negative
+          // (as fixed in HDFS-7501).
+          assertEquals(
+                  0L,
+                  cluster2.getNameNode(1).getNamesystem()
+                          .getTransactionsSinceLastCheckpoint(), "Should be zero right after the checkpoint.");
         fs2.mkdirs(new Path("/tmp-t3"));
         fs2.mkdirs(new Path("/tmp-t4"));
         HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
-        // Test to ensure we track the right numbers after
-        // the checkpoint resets it to zero again.
-        assertEquals("SBN failed to track 2 added txns after the ckpt.",
-            4L,
-            cluster2.getNameNode(1).getNamesystem()
-              .getTransactionsSinceLastCheckpoint());
+          // Test to ensure we track the right numbers after
+          // the checkpoint resets it to zero again.
+          assertEquals(
+                  4L,
+                  cluster2.getNameNode(1).getNamesystem()
+                          .getTransactionsSinceLastCheckpoint(), "SBN failed to track 2 added txns after the ckpt.");
         cluster2.shutdown();
         break;
       } catch (Exception e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
index 4d3a4f0..cfe42f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestTopMetrics.java
@@ -23,7 +23,7 @@
 import org.apache.hadoop.metrics2.MetricsCollector;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.lib.Interns;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics.TOPMETRICS_METRICS_SOURCE_NAME;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
index d57a734..3ff015f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
@@ -42,15 +42,15 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.GSet;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.*;
 import java.util.*;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Helper for writing snapshot related tests
@@ -153,9 +153,9 @@
     // Compare the snapshot with the current dir
     FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir);
     FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot);
-    assertEquals("snapshottedDir=" + snapshottedDir
-        + ", snapshotRoot=" + snapshotRoot,
-        currentFiles.length, snapshotFiles.length);
+      assertEquals(
+              currentFiles.length, snapshotFiles.length, "snapshottedDir=" + snapshottedDir
+              + ", snapshotRoot=" + snapshotRoot);
   }
   
   /**
@@ -247,8 +247,8 @@
         }
         assertEquals(line1.trim(), line2.trim());
       }
-      Assert.assertNull(reader1.readLine());
-      Assert.assertNull(reader2.readLine());
+      Assertions.assertNull(reader1.readLine());
+      Assertions.assertNull(reader2.readLine());
     } finally {
       reader1.close();
       reader2.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
index 9f911fa..dbb2d4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
@@ -21,7 +21,7 @@
 import static org.apache.hadoop.fs.permission.AclEntryScope.*;
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.List;
@@ -50,11 +50,11 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Lists;
 
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 /**
@@ -77,14 +77,14 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     initCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws Exception {
     IOUtils.cleanupWithLogger(null, hdfs, fsAsBruce, fsAsDiana);
     if (cluster != null) {
@@ -92,7 +92,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() {
     ++pathCount;
     path = new Path("/p" + pathCount);
@@ -666,8 +666,8 @@
         aclEntry(ACCESS, GROUP, "testdeduplicategroup", ALL));
     hdfs.mkdirs(path);
     hdfs.modifyAclEntries(path, aclSpec);
-    assertEquals("One more ACL feature should be unique", startSize + 1,
-        AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
+      assertEquals(startSize + 1,
+              AclStorage.getUniqueAclFeatures().getUniqueElementsSize(), "One more ACL feature should be unique");
     Path subdir = new Path(path, "sub-dir");
     hdfs.mkdirs(subdir);
     Path file = new Path(path, "file");
@@ -677,15 +677,15 @@
       // create the snapshot with root directory having ACLs should refer to
       // same ACLFeature without incrementing the reference count
       aclFeature = FSAclBaseTest.getAclFeature(path, cluster);
-      assertEquals("Reference count should be one before snapshot", 1,
-          aclFeature.getRefCount());
+        assertEquals(1,
+                aclFeature.getRefCount(), "Reference count should be one before snapshot");
       Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
           snapshotName);
       AclFeature snapshotAclFeature = FSAclBaseTest.getAclFeature(snapshotPath,
           cluster);
       assertSame(aclFeature, snapshotAclFeature);
-      assertEquals("Reference count should be increased", 2,
-          snapshotAclFeature.getRefCount());
+        assertEquals(2,
+                snapshotAclFeature.getRefCount(), "Reference count should be increased");
     }
     {
       // deleting the snapshot with root directory having ACLs should not alter
@@ -695,15 +695,15 @@
     {
       hdfs.modifyAclEntries(subdir, aclSpec);
       aclFeature = FSAclBaseTest.getAclFeature(subdir, cluster);
-      assertEquals("Reference count should be 1", 1, aclFeature.getRefCount());
+        assertEquals(1, aclFeature.getRefCount(), "Reference count should be 1");
       Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
           snapshotName);
       Path subdirInSnapshot = new Path(snapshotPath, "sub-dir");
       AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(subdirInSnapshot,
           cluster);
       assertSame(aclFeature, snapshotAcl);
-      assertEquals("Reference count should remain same", 1,
-          aclFeature.getRefCount());
+        assertEquals(1,
+                aclFeature.getRefCount(), "Reference count should remain same");
 
       // Delete the snapshot with sub-directory containing the ACLs should not
       // alter the reference count for AclFeature
@@ -712,15 +712,15 @@
     {
       hdfs.modifyAclEntries(file, aclSpec);
       aclFeature = FSAclBaseTest.getAclFeature(file, cluster);
-      assertEquals("Reference count should be 1", 1, aclFeature.getRefCount());
+        assertEquals(1, aclFeature.getRefCount(), "Reference count should be 1");
       Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
           snapshotName);
       Path fileInSnapshot = new Path(snapshotPath, file.getName());
       AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(fileInSnapshot,
           cluster);
       assertSame(aclFeature, snapshotAcl);
-      assertEquals("Reference count should remain same", 1,
-          aclFeature.getRefCount());
+        assertEquals(1,
+                aclFeature.getRefCount(), "Reference count should remain same");
 
       // Delete the snapshot with contained file having ACLs should not
       // alter the reference count for AclFeature
@@ -735,8 +735,8 @@
       AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(snapshotPath,
           cluster);
       aclFeature = FSAclBaseTest.getAclFeature(path, cluster);
-      assertEquals("Before modification same ACL should be referenced twice", 2,
-          aclFeature.getRefCount());
+        assertEquals(2,
+                aclFeature.getRefCount(), "Before modification same ACL should be referenced twice");
       List<AclEntry> newAcl = Lists.newArrayList(aclEntry(ACCESS, USER,
           "testNewUser", ALL));
       hdfs.modifyAclEntries(path, newAcl);
@@ -745,10 +745,10 @@
           snapshotPath, cluster);
       assertSame(snapshotAcl, snapshotAclPostModification);
       assertNotSame(aclFeature, snapshotAclPostModification);
-      assertEquals("Old ACL feature reference count should be same", 1,
-          snapshotAcl.getRefCount());
-      assertEquals("New ACL feature reference should be used", 1,
-          aclFeature.getRefCount());
+        assertEquals(1,
+                snapshotAcl.getRefCount(), "Old ACL feature reference count should be same");
+        assertEquals(1,
+                aclFeature.getRefCount(), "New ACL feature reference should be used");
       deleteSnapshotWithAclAndVerify(aclFeature, path, startSize);
     }
     {
@@ -765,9 +765,9 @@
       hdfs.modifyAclEntries(subdir, newAcl);
       aclFeature = FSAclBaseTest.getAclFeature(subdir, cluster);
       assertNotSame(aclFeature, snapshotAclFeature);
-      assertEquals("Reference count should remain same", 1,
-          snapshotAclFeature.getRefCount());
-      assertEquals("New AclFeature should be used", 1, aclFeature.getRefCount());
+        assertEquals(1,
+                snapshotAclFeature.getRefCount(), "Reference count should remain same");
+        assertEquals(1, aclFeature.getRefCount(), "New AclFeature should be used");
 
       deleteSnapshotWithAclAndVerify(aclFeature, subdir, startSize);
     }
@@ -785,8 +785,8 @@
       hdfs.modifyAclEntries(file, newAcl);
       aclFeature = FSAclBaseTest.getAclFeature(file, cluster);
       assertNotSame(aclFeature, snapshotAclFeature);
-      assertEquals("Reference count should remain same", 1,
-          snapshotAclFeature.getRefCount());
+        assertEquals(1,
+                snapshotAclFeature.getRefCount(), "Reference count should remain same");
       deleteSnapshotWithAclAndVerify(aclFeature, file, startSize);
     }
     {
@@ -813,15 +813,15 @@
       assertSame(fileAcl, snapshotFileAclFeature);
       assertSame(dirAcl, snapshotDirAclFeature);
       hdfs.delete(subdir, true);
-      assertEquals(
-          "Original ACLs references should be maintained for snapshot", 1,
-          snapshotFileAclFeature.getRefCount());
-      assertEquals(
-          "Original ACLs references should be maintained for snapshot", 1,
-          snapshotDirAclFeature.getRefCount());
+        assertEquals(1,
+                snapshotFileAclFeature.getRefCount(),
+                "Original ACLs references should be maintained for snapshot");
+        assertEquals(1,
+                snapshotDirAclFeature.getRefCount(),
+                "Original ACLs references should be maintained for snapshot");
       hdfs.deleteSnapshot(path, snapshotName);
-      assertEquals("ACLs should be deleted from snapshot", startSize, AclStorage
-          .getUniqueAclFeatures().getUniqueElementsSize());
+        assertEquals(startSize, AclStorage
+                .getUniqueAclFeatures().getUniqueElementsSize(), "ACLs should be deleted from snapshot");
     }
   }
 
@@ -831,14 +831,14 @@
     AclFeature afterDeleteAclFeature = FSAclBaseTest.getAclFeature(
         pathToCheckAcl, cluster);
     assertSame(aclFeature, afterDeleteAclFeature);
-    assertEquals("Reference count should remain same"
-        + " even after deletion of snapshot", 1,
-        afterDeleteAclFeature.getRefCount());
+      assertEquals(1,
+              afterDeleteAclFeature.getRefCount(), "Reference count should remain same"
+              + " even after deletion of snapshot");
 
     hdfs.removeAcl(pathToCheckAcl);
-    assertEquals("Reference count should be 0", 0, aclFeature.getRefCount());
-    assertEquals("Unique ACL features should remain same", totalAclFeatures,
-        AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
+      assertEquals(0, aclFeature.getRefCount(), "Reference count should be 0");
+      assertEquals(totalAclFeatures,
+              AclStorage.getUniqueAclFeatures().getUniqueElementsSize(), "Unique ACL features should remain same");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java
index 3b83452..9d74e94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.File;
 import java.io.IOException;
@@ -32,8 +32,8 @@
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestCheckpointsWithSnapshots {
   
@@ -43,7 +43,7 @@
     conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
   }
   
-  @Before
+  @BeforeEach
   public void setUp() {
     FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDiffListBySkipList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDiffListBySkipList.java
index 43f9bce..b3902f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDiffListBySkipList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDiffListBySkipList.java
@@ -29,10 +29,10 @@
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffListBySkipList.SkipListNode;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.util.Collections;
 import java.util.List;
@@ -57,7 +57,7 @@
   private static FSDirectory fsdir;
   private static DistributedFileSystem hdfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     cluster =
         new MiniDFSCluster.Builder(CONF).numDataNodes(0).format(true).build();
@@ -67,7 +67,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -81,9 +81,9 @@
   }
 
   static void assertList(List<INode> expected, List<INode> computed) {
-    Assert.assertEquals(expected.size(), computed.size());
+    Assertions.assertEquals(expected.size(), computed.size());
     for (int index = 0; index < expected.size(); index++) {
-      Assert.assertEquals(expected.get(index), computed.get(index));
+      Assertions.assertEquals(expected.get(index), computed.get(index));
     }
   }
 
@@ -107,7 +107,7 @@
       DiffList<DirectoryDiff> array, DiffListBySkipList skip,
       INodeDirectory dir, List<INode> childrenList) {
     final int n = array.size();
-    Assert.assertEquals(n, skip.size());
+    Assertions.assertEquals(n, skip.size());
     for (int i = 0; i < n - 1; i++) {
       for (int j = i + 1; j < n - 1; j++) {
         final List<INode> expected = getCombined(array, i, j, dir)
@@ -254,8 +254,8 @@
     final DiffListBySkipList skipList = newDiffListBySkipList();
     final DiffList<DirectoryDiff> arrayList = new DiffListByArrayList<>(0);
     final INodeDirectory dir = addDiff(n, skipList, arrayList, root);
-    Assert.assertEquals(n, arrayList.size());
-    Assert.assertEquals(n, skipList.size());
+    Assertions.assertEquals(n, arrayList.size());
+    Assertions.assertEquals(n, skipList.size());
 
     for (int i = 0; i < n; i++) {
       DiffListBySkipList.LOG.debug("i={}: {}", i, skipList);
@@ -326,7 +326,7 @@
 
   static void assertDirectoryDiff(DirectoryDiff expected,
       DirectoryDiff computed) {
-    Assert.assertEquals(expected.getSnapshotId(), computed.getSnapshotId());
+    Assertions.assertEquals(expected.getSnapshotId(), computed.getSnapshotId());
   }
 
   static void assertSkipList(DiffListBySkipList skipList) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java
index 5a74de35..89a70a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java
@@ -17,8 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
 import java.util.ArrayList;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Options;
@@ -29,9 +30,9 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * This class tests snapshot functionality. One or multiple snapshots are
@@ -55,7 +56,7 @@
   protected static ArrayList<Path> snapshotList = new ArrayList<Path>();
   static Path objInSnapshot = null;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
@@ -73,7 +74,7 @@
         "dir1");
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFSImageWithOrderedSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFSImageWithOrderedSnapshotDeletion.java
index dec28e4..61ee4e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFSImageWithOrderedSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFSImageWithOrderedSnapshotDeletion.java
@@ -25,10 +25,10 @@
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.visitor.NamespacePrintVisitor;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 import java.io.File;
@@ -36,7 +36,7 @@
 import java.io.PrintWriter;
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test FSImage correctness with ordered snapshot deletion.
@@ -60,7 +60,7 @@
   FSNamesystem fsn;
   DistributedFileSystem hdfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
@@ -71,7 +71,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -189,7 +189,7 @@
     output.println(b);
 
     final String s = NamespacePrintVisitor.print2Sting(fsn);
-    Assert.assertEquals(b, s);
+    Assertions.assertEquals(b, s);
     return b;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileContextSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileContextSnapshot.java
index a6cd1dd..335346f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileContextSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileContextSnapshot.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
@@ -31,9 +29,9 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestFileContextSnapshot {
 
@@ -49,7 +47,7 @@
   private final Path filePath = new Path(snapshotRoot, "file1");
   private Path snapRootPath;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -63,7 +61,7 @@
     dfs.mkdirs(snapRootPath);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -86,9 +84,9 @@
     // allow snapshot on dir
     dfs.allowSnapshot(snapRootPath);
     Path ssPath = fileContext.createSnapshot(snapRootPath, "s1");
-    assertTrue("Failed to create snapshot", dfs.exists(ssPath));
+      assertTrue(dfs.exists(ssPath), "Failed to create snapshot");
     fileContext.deleteSnapshot(snapRootPath, "s1");
-    assertFalse("Failed to delete snapshot", dfs.exists(ssPath));
+      assertFalse(dfs.exists(ssPath), "Failed to delete snapshot");
   }
 
   /**
@@ -101,25 +99,25 @@
     // Create snapshot for sub1
     Path snapPath1 = fileContext.createSnapshot(snapRootPath, "s1");
     Path ssPath = new Path(snapPath1, filePath.getName());
-    assertTrue("Failed to create snapshot", dfs.exists(ssPath));
+      assertTrue(dfs.exists(ssPath), "Failed to create snapshot");
     FileStatus statusBeforeRename = dfs.getFileStatus(ssPath);
 
     // Rename the snapshot
     fileContext.renameSnapshot(snapRootPath, "s1", "s2");
-    // <sub1>/.snapshot/s1/file1 should no longer exist
-    assertFalse("Old snapshot still exists after rename!", dfs.exists(ssPath));
+      // <sub1>/.snapshot/s1/file1 should no longer exist
+      assertFalse(dfs.exists(ssPath), "Old snapshot still exists after rename!");
     Path snapshotRoot = SnapshotTestHelper.getSnapshotRoot(snapRootPath, "s2");
     ssPath = new Path(snapshotRoot, filePath.getName());
 
-    // Instead, <sub1>/.snapshot/s2/file1 should exist
-    assertTrue("Snapshot doesn't exists!", dfs.exists(ssPath));
+      // Instead, <sub1>/.snapshot/s2/file1 should exist
+      assertTrue(dfs.exists(ssPath), "Snapshot doesn't exists!");
     FileStatus statusAfterRename = dfs.getFileStatus(ssPath);
 
-    // FileStatus of the snapshot should not change except the path
-    assertFalse("Filestatus of the snapshot matches",
-        statusBeforeRename.equals(statusAfterRename));
+      // FileStatus of the snapshot should not change except the path
+      assertFalse(
+              statusBeforeRename.equals(statusAfterRename), "Filestatus of the snapshot matches");
     statusBeforeRename.setPath(statusAfterRename.getPath());
-    assertEquals("FileStatus of the snapshot mismatches!",
-        statusBeforeRename.toString(), statusAfterRename.toString());
+      assertEquals(
+              statusBeforeRename.toString(), statusAfterRename.toString(), "FileStatus of the snapshot mismatches!");
   }
 }
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
index e864b91..81687fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
@@ -31,13 +31,13 @@
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.Lists;
 
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import static org.apache.hadoop.fs.StorageType.DISK;
 import static org.apache.hadoop.fs.StorageType.SSD;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.anyByte;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -75,8 +75,8 @@
         bsps, collectedBlocks, removedINodes, null);
     sf.updateQuotaAndCollectBlocks(ctx, file, diff);
     QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
-    Assert.assertEquals(0, counts.getStorageSpace());
-    Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
+    Assertions.assertEquals(0, counts.getStorageSpace());
+    Assertions.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
 
     // INode only exists in the snapshot
     INodeFile snapshotINode = mock(INodeFile.class);
@@ -91,10 +91,10 @@
     blocks[0].setReplication(REPL_3);
     sf.updateQuotaAndCollectBlocks(ctx, file, diff);
     counts = ctx.quotaDelta().getCountsCopy();
-    Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE,
+    Assertions.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE,
                         counts.getStorageSpace());
-    Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
-    Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
+    Assertions.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
+    Assertions.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java
index 9aadeb2..7b586cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestGetContentSummaryWithSnapshot.java
@@ -26,12 +26,10 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -51,10 +49,7 @@
   protected FSDirectory fsdir;
   protected DistributedFileSystem dfs;
 
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -66,7 +61,7 @@
     dfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -103,68 +98,68 @@
 
     ContentSummary summary = cluster.getNameNodeRpc().getContentSummary(
         bar.toString());
-    Assert.assertEquals(1, summary.getDirectoryCount());
-    Assert.assertEquals(2, summary.getFileCount());
-    Assert.assertEquals(20, summary.getLength());
+    Assertions.assertEquals(1, summary.getDirectoryCount());
+    Assertions.assertEquals(2, summary.getFileCount());
+    Assertions.assertEquals(20, summary.getLength());
 
     final Path barS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar");
     summary = cluster.getNameNodeRpc().getContentSummary(barS1.toString());
-    Assert.assertEquals(1, summary.getDirectoryCount());
-    Assert.assertEquals(0, summary.getFileCount());
-    Assert.assertEquals(0, summary.getLength());
+    Assertions.assertEquals(1, summary.getDirectoryCount());
+    Assertions.assertEquals(0, summary.getFileCount());
+    Assertions.assertEquals(0, summary.getLength());
 
     // also check /foo and /foo/.snapshot/s1
     summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
-    Assert.assertEquals(2, summary.getDirectoryCount());
-    Assert.assertEquals(2, summary.getFileCount());
-    Assert.assertEquals(20, summary.getLength());
+    Assertions.assertEquals(2, summary.getDirectoryCount());
+    Assertions.assertEquals(2, summary.getFileCount());
+    Assertions.assertEquals(20, summary.getLength());
 
     final Path fooS1 = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
     summary = cluster.getNameNodeRpc().getContentSummary(fooS1.toString());
-    Assert.assertEquals(2, summary.getDirectoryCount());
-    Assert.assertEquals(0, summary.getFileCount());
-    Assert.assertEquals(0, summary.getLength());
+    Assertions.assertEquals(2, summary.getDirectoryCount());
+    Assertions.assertEquals(0, summary.getFileCount());
+    Assertions.assertEquals(0, summary.getLength());
 
     // create a new snapshot s2 and update the file
     dfs.createSnapshot(foo, "s2");
     DFSTestUtil.appendFile(dfs, baz, 10);
     summary = cluster.getNameNodeRpc().getContentSummary(
         bar.toString());
-    Assert.assertEquals(1, summary.getDirectoryCount());
-    Assert.assertEquals(2, summary.getFileCount());
-    Assert.assertEquals(30, summary.getLength());
+    Assertions.assertEquals(1, summary.getDirectoryCount());
+    Assertions.assertEquals(2, summary.getFileCount());
+    Assertions.assertEquals(30, summary.getLength());
 
     final Path fooS2 = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
     summary = cluster.getNameNodeRpc().getContentSummary(fooS2.toString());
-    Assert.assertEquals(2, summary.getDirectoryCount());
-    Assert.assertEquals(2, summary.getFileCount());
-    Assert.assertEquals(20, summary.getLength());
+    Assertions.assertEquals(2, summary.getDirectoryCount());
+    Assertions.assertEquals(2, summary.getFileCount());
+    Assertions.assertEquals(20, summary.getLength());
 
     cluster.getNameNodeRpc().delete(baz.toString(), false);
 
     summary = cluster.getNameNodeRpc().getContentSummary(
         foo.toString());
-    Assert.assertEquals(0, summary.getSnapshotDirectoryCount());
-    Assert.assertEquals(1, summary.getSnapshotFileCount());
-    Assert.assertEquals(20, summary.getSnapshotLength());
-    Assert.assertEquals(2, summary.getDirectoryCount());
-    Assert.assertEquals(2, summary.getFileCount());
-    Assert.assertEquals(30, summary.getLength());
+    Assertions.assertEquals(0, summary.getSnapshotDirectoryCount());
+    Assertions.assertEquals(1, summary.getSnapshotFileCount());
+    Assertions.assertEquals(20, summary.getSnapshotLength());
+    Assertions.assertEquals(2, summary.getDirectoryCount());
+    Assertions.assertEquals(2, summary.getFileCount());
+    Assertions.assertEquals(30, summary.getLength());
 
     final Path bazS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar/baz");
     try {
       cluster.getNameNodeRpc().getContentSummary(bazS1.toString());
-      Assert.fail("should get FileNotFoundException");
+      Assertions.fail("should get FileNotFoundException");
     } catch (FileNotFoundException ignored) {}
     cluster.getNameNodeRpc().rename(qux.toString(), "/temp/qux");
     summary = cluster.getNameNodeRpc().getContentSummary(
         foo.toString());
-    Assert.assertEquals(0, summary.getSnapshotDirectoryCount());
-    Assert.assertEquals(2, summary.getSnapshotFileCount());
-    Assert.assertEquals(30, summary.getSnapshotLength());
-    Assert.assertEquals(2, summary.getDirectoryCount());
-    Assert.assertEquals(2, summary.getFileCount());
-    Assert.assertEquals(30, summary.getLength());
+    Assertions.assertEquals(0, summary.getSnapshotDirectoryCount());
+    Assertions.assertEquals(2, summary.getSnapshotFileCount());
+    Assertions.assertEquals(30, summary.getSnapshotLength());
+    Assertions.assertEquals(2, summary.getDirectoryCount());
+    Assertions.assertEquals(2, summary.getFileCount());
+    Assertions.assertEquals(30, summary.getLength());
 
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
index 4f32af9..b5de4bb 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -49,9 +47,9 @@
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test snapshot functionalities while file appending.
@@ -74,7 +72,7 @@
   DistributedFileSystem hdfs;
   FSDirectory fsdir;
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -87,7 +85,7 @@
     hdfs.mkdirs(dir);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java
index 672f21a..6dc4200 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java
@@ -26,16 +26,13 @@
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.
     DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Tests listSnapshot.
@@ -51,7 +48,7 @@
   FSNamesystem fsn;
   DistributedFileSystem hdfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
@@ -63,7 +60,7 @@
     hdfs.mkdirs(dir1);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
index 6756727..703bf18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_MAX_LIMIT;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.Random;
@@ -39,10 +39,10 @@
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 /** Testing nested snapshots. */
 public class TestNestedSnapshots {
@@ -67,7 +67,7 @@
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem hdfs;
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf.setInt(DFS_NAMENODE_SNAPSHOT_MAX_LIMIT, SNAPSHOTLIMIT);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
@@ -76,7 +76,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -141,14 +141,14 @@
     cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(false);
     try {
       hdfs.allowSnapshot(rootPath);
-      Assert.fail();
+      Assertions.fail();
     } catch (SnapshotException se) {
       assertNestedSnapshotException(
           se, "subdirectory");
     }
     try {
       hdfs.allowSnapshot(foo);
-      Assert.fail();
+      Assertions.fail();
     } catch (SnapshotException se) {
       assertNestedSnapshotException(
           se, "subdirectory");
@@ -159,14 +159,14 @@
     hdfs.mkdirs(sub2Bar);
     try {
       hdfs.allowSnapshot(sub1Bar);
-      Assert.fail();
+      Assertions.fail();
     } catch (SnapshotException se) {
       assertNestedSnapshotException(
           se, "ancestor");
     }
     try {
       hdfs.allowSnapshot(sub2Bar);
-      Assert.fail();
+      Assertions.fail();
     } catch (SnapshotException se) {
       assertNestedSnapshotException(
           se, "ancestor");
@@ -174,9 +174,9 @@
   }
   
   static void assertNestedSnapshotException(SnapshotException se, String substring) {
-    Assert.assertTrue(se.getMessage().startsWith(
+    Assertions.assertTrue(se.getMessage().startsWith(
         "Nested snapshottable directories not allowed"));
-    Assert.assertTrue(se.getMessage().contains(substring));
+    Assertions.assertTrue(se.getMessage().contains(substring));
   }
 
   private static void print(String message) throws UnresolvedLinkException {
@@ -190,10 +190,10 @@
         new Path(s1, "bar/" + file.getName()),
         new Path(s2, file.getName())
     };
-    Assert.assertEquals(expected.length, paths.length);
+    Assertions.assertEquals(expected.length, paths.length);
     for(int i = 0; i < paths.length; i++) {
       final boolean computed = hdfs.exists(paths[i]);
-      Assert.assertEquals("Failed on " + paths[i], expected[i], computed);
+        Assertions.assertEquals(expected[i], computed, "Failed on " + paths[i]);
     }
   }
 
@@ -224,7 +224,7 @@
 
     try {
       hdfs.createSnapshot(dir, "s" + s);
-      Assert.fail("Expected to fail to create snapshot, but didn't.");
+      Assertions.fail("Expected to fail to create snapshot, but didn't.");
     } catch(IOException ioe) {
       SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
     }
@@ -235,7 +235,7 @@
       for(; s < SNAPSHOTLIMIT; s += RANDOM.nextInt(step)) {
         final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file);
         //the file #f exists in snapshot #s iff s > f.
-        Assert.assertEquals(s > f, hdfs.exists(p));
+        Assertions.assertEquals(s > f, hdfs.exists(p));
       }
     }
   }
@@ -260,13 +260,13 @@
       final Path snapshotPath = hdfs.createSnapshot(dir);
 
       //check snapshot path and the default snapshot name
-      final String snapshotName = snapshotPath.getName(); 
-      Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches(
-          "s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",
-          snapshotName));
+      final String snapshotName = snapshotPath.getName();
+        Assertions.assertTrue(Pattern.matches(
+                "s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",
+                snapshotName), "snapshotName=" + snapshotName);
       final Path parent = snapshotPath.getParent();
-      Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
-      Assert.assertEquals(dir, parent.getParent());
+      Assertions.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
+      Assertions.assertEquals(dir, parent.getParent());
     }
   }
 
@@ -287,18 +287,18 @@
       new Snapshot(2, "s2", snapshottable),
     };
 
-    Assert.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
+    Assertions.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
     for(Snapshot s : snapshots) {
-      Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) > 0);
-      Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) < 0);
+      Assertions.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) > 0);
+      Assertions.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) < 0);
       
       for(Snapshot t : snapshots) {
         final int expected = s.getRoot().getLocalName().compareTo(
             t.getRoot().getLocalName());
         final int computed = Snapshot.ID_COMPARATOR.compare(s, t);
-        Assert.assertEquals(expected > 0, computed > 0);
-        Assert.assertEquals(expected == 0, computed == 0);
-        Assert.assertEquals(expected < 0, computed < 0);
+        Assertions.assertEquals(expected > 0, computed > 0);
+        Assertions.assertEquals(expected == 0, computed == 0);
+        Assertions.assertEquals(expected < 0, computed < 0);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index 04a3416..68f7dce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -44,10 +44,10 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestOpenFilesWithSnapshot {
   private static final Logger LOG =
@@ -62,7 +62,7 @@
   private static final long BUFFERLEN = BLOCKSIZE / 2;
   private static final long FILELEN = BLOCKSIZE * 2;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES, true);
@@ -72,7 +72,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     if (fs != null) {
       fs.close();
@@ -313,13 +313,13 @@
     final long hbaseFileLengthAfterS1 = fs.getFileStatus(hbaseFile).getLen();
 
     // Verify if Snap S1 file lengths are same as the the live ones
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         fs.getFileStatus(flumeS1Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS1,
+    Assertions.assertEquals(hbaseFileLengthAfterS1,
         fs.getFileStatus(hbaseS1Path).getLen());
-    Assert.assertEquals(appAFileInitialLength,
+    Assertions.assertEquals(appAFileInitialLength,
         fs.getFileStatus(appAFile).getLen());
-    Assert.assertEquals(appBFileInitialLength,
+    Assertions.assertEquals(appBFileInitialLength,
         fs.getFileStatus(appBFile).getLen());
 
     long flumeFileWrittenDataLength = flumeFileLengthAfterS1;
@@ -346,17 +346,17 @@
     // Verify live files lengths are same as all data written till now
     final long flumeFileLengthAfterS2 = fs.getFileStatus(flumeFile).getLen();
     final long hbaseFileLengthAfterS2 = fs.getFileStatus(hbaseFile).getLen();
-    Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
-    Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS2);
+    Assertions.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
+    Assertions.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS2);
 
     // Verify if Snap S2 file lengths are same as the live ones
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         fs.getFileStatus(flumeS2Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS2,
+    Assertions.assertEquals(hbaseFileLengthAfterS2,
         fs.getFileStatus(hbaseS2Path).getLen());
-    Assert.assertEquals(appAFileInitialLength,
+    Assertions.assertEquals(appAFileInitialLength,
         fs.getFileStatus(appAFile).getLen());
-    Assert.assertEquals(appBFileInitialLength,
+    Assertions.assertEquals(appBFileInitialLength,
         fs.getFileStatus(appBFile).getLen());
 
     // Write more data to appA file only
@@ -366,9 +366,9 @@
     appAFileWrittenDataLength += writeToStream(appAOutputStream, buf);
 
     // Verify other open files are not affected in their snapshots
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         fs.getFileStatus(flumeS2Path).getLen());
-    Assert.assertEquals(appAFileWrittenDataLength,
+    Assertions.assertEquals(appAFileWrittenDataLength,
         fs.getFileStatus(appAFile).getLen());
 
     // Write more data to flume file only
@@ -388,35 +388,35 @@
     // Verify live files lengths are same as all data written till now
     final long flumeFileLengthAfterS3 = fs.getFileStatus(flumeFile).getLen();
     final long hbaseFileLengthAfterS3 = fs.getFileStatus(hbaseFile).getLen();
-    Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS3);
-    Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS3);
+    Assertions.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS3);
+    Assertions.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS3);
 
     // Verify if Snap S3 file lengths are same as the live ones
-    Assert.assertEquals(flumeFileLengthAfterS3,
+    Assertions.assertEquals(flumeFileLengthAfterS3,
         fs.getFileStatus(flumeS3Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS3,
+    Assertions.assertEquals(hbaseFileLengthAfterS3,
         fs.getFileStatus(hbaseS3Path).getLen());
-    Assert.assertEquals(appAFileWrittenDataLength,
+    Assertions.assertEquals(appAFileWrittenDataLength,
         fs.getFileStatus(appAFile).getLen());
-    Assert.assertEquals(appBFileInitialLength,
+    Assertions.assertEquals(appBFileInitialLength,
         fs.getFileStatus(appBFile).getLen());
 
     // Verify old flume snapshots have point-in-time / frozen file lengths
     // even after the live file have moved forward.
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         fs.getFileStatus(flumeS1Path).getLen());
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         fs.getFileStatus(flumeS2Path).getLen());
-    Assert.assertEquals(flumeFileLengthAfterS3,
+    Assertions.assertEquals(flumeFileLengthAfterS3,
         fs.getFileStatus(flumeS3Path).getLen());
 
     // Verify old hbase snapshots have point-in-time / frozen file lengths
     // even after the live files have moved forward.
-    Assert.assertEquals(hbaseFileLengthAfterS1,
+    Assertions.assertEquals(hbaseFileLengthAfterS1,
         fs.getFileStatus(hbaseS1Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS2,
+    Assertions.assertEquals(hbaseFileLengthAfterS2,
         fs.getFileStatus(hbaseS2Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS3,
+    Assertions.assertEquals(hbaseFileLengthAfterS3,
         fs.getFileStatus(hbaseS3Path).getLen());
 
     flumeOutputStream.close();
@@ -450,7 +450,7 @@
     final long flumeFileLengthAfterS1 = fs.getFileStatus(flumeFile).getLen();
 
     // Verify if Snap S1 file length is same as the the live one
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         fs.getFileStatus(flumeS1Path).getLen());
 
     long flumeFileWrittenDataLength = flumeFileLengthAfterS1;
@@ -469,10 +469,10 @@
 
     // Verify live files length is same as all data written till now
     final long flumeFileLengthAfterS2 = fs.getFileStatus(flumeFile).getLen();
-    Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
+    Assertions.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
 
     // Verify if Snap S2 file length is same as the live one
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         fs.getFileStatus(flumeS2Path).getLen());
 
     // Write more data to flume file
@@ -480,9 +480,9 @@
 
     // Verify old flume snapshots have point-in-time / frozen file lengths
     // even after the live file have moved forward.
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         fs.getFileStatus(flumeS1Path).getLen());
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         fs.getFileStatus(flumeS2Path).getLen());
 
     // Restart the NameNode
@@ -490,14 +490,14 @@
     cluster.waitActive();
 
     // Verify live file length hasn't changed after NN restart
-    Assert.assertEquals(flumeFileWrittenDataLength,
+    Assertions.assertEquals(flumeFileWrittenDataLength,
         fs.getFileStatus(flumeFile).getLen());
 
     // Verify old flume snapshots have point-in-time / frozen file lengths
     // after NN restart and live file moved forward.
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         fs.getFileStatus(flumeS1Path).getLen());
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         fs.getFileStatus(flumeS2Path).getLen());
 
     flumeOutputStream.close();
@@ -534,9 +534,9 @@
     final long hbaseFileLengthAfterS1 = fs.getFileStatus(hbaseFile).getLen();
 
     // Verify if Snap S1 file length is same as the the current versions
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         fs.getFileStatus(flumeS1Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS1,
+    Assertions.assertEquals(hbaseFileLengthAfterS1,
         fs.getFileStatus(hbaseS1Path).getLen());
 
     long flumeFileWrittenDataLength = flumeFileLengthAfterS1;
@@ -558,14 +558,14 @@
 
     // Verify current files length are same as all data written till now
     final long flumeFileLengthAfterS2 = fs.getFileStatus(flumeFile).getLen();
-    Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
+    Assertions.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
     final long hbaseFileLengthAfterS2 = fs.getFileStatus(hbaseFile).getLen();
-    Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS2);
+    Assertions.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS2);
 
     // Verify if Snap S2 file length is same as the current versions
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         fs.getFileStatus(flumeS2Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS2,
+    Assertions.assertEquals(hbaseFileLengthAfterS2,
         fs.getFileStatus(hbaseS2Path).getLen());
 
     // Write more data to open files
@@ -574,22 +574,22 @@
 
     // Verify old snapshots have point-in-time/frozen file
     // lengths even after the current versions have moved forward.
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         fs.getFileStatus(flumeS1Path).getLen());
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         fs.getFileStatus(flumeS2Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS1,
+    Assertions.assertEquals(hbaseFileLengthAfterS1,
         fs.getFileStatus(hbaseS1Path).getLen());
-    Assert.assertEquals(hbaseFileLengthAfterS2,
+    Assertions.assertEquals(hbaseFileLengthAfterS2,
         fs.getFileStatus(hbaseS2Path).getLen());
 
     // Delete flume current file. Snapshots should
     // still have references to flume file.
     boolean flumeFileDeleted = fs.delete(flumeFile, true);
-    Assert.assertTrue(flumeFileDeleted);
-    Assert.assertFalse(fs.exists(flumeFile));
-    Assert.assertTrue(fs.exists(flumeS1Path));
-    Assert.assertTrue(fs.exists(flumeS2Path));
+    Assertions.assertTrue(flumeFileDeleted);
+    Assertions.assertFalse(fs.exists(flumeFile));
+    Assertions.assertTrue(fs.exists(flumeS1Path));
+    Assertions.assertTrue(fs.exists(flumeS2Path));
 
     SnapshotTestHelper.createSnapshot(fs, snapRootDir, "tmp_snap");
     fs.deleteSnapshot(snapRootDir, "tmp_snap");
@@ -597,14 +597,14 @@
     // Delete snap_2. snap_1 still has reference to
     // the flume file.
     fs.deleteSnapshot(snapRootDir, snap2Name);
-    Assert.assertFalse(fs.exists(flumeS2Path));
-    Assert.assertTrue(fs.exists(flumeS1Path));
+    Assertions.assertFalse(fs.exists(flumeS2Path));
+    Assertions.assertTrue(fs.exists(flumeS1Path));
 
     // Delete snap_1. Now all traces of flume file
     // is gone.
     fs.deleteSnapshot(snapRootDir, snap1Name);
-    Assert.assertFalse(fs.exists(flumeS2Path));
-    Assert.assertFalse(fs.exists(flumeS1Path));
+    Assertions.assertFalse(fs.exists(flumeS2Path));
+    Assertions.assertFalse(fs.exists(flumeS1Path));
 
     // Create Snapshot S3
     final Path snap3Dir = SnapshotTestHelper.createSnapshot(
@@ -613,7 +613,7 @@
 
     // Verify live files length is same as all data written till now
     final long hbaseFileLengthAfterS3 = fs.getFileStatus(hbaseFile).getLen();
-    Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS3);
+    Assertions.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS3);
 
     // Write more data to open files
     hbaseFileWrittenDataLength += writeToStream(hbaseOutputStream, buf);
@@ -621,9 +621,9 @@
     // Verify old snapshots have point-in-time/frozen file
     // lengths even after the flume open file is deleted and
     // the hbase live file has moved forward.
-    Assert.assertEquals(hbaseFileLengthAfterS3,
+    Assertions.assertEquals(hbaseFileLengthAfterS3,
         fs.getFileStatus(hbaseS3Path).getLen());
-    Assert.assertEquals(hbaseFileWrittenDataLength,
+    Assertions.assertEquals(hbaseFileWrittenDataLength,
         fs.getFileStatus(hbaseFile).getLen());
 
     hbaseOutputStream.close();
@@ -666,12 +666,12 @@
     // its output stream is still open.
     fs.delete(hbaseFile, true);
     fs.deleteSnapshot(snapRootDir, snap1Name);
-    Assert.assertFalse(fs.exists(hbaseFile));
+    Assertions.assertFalse(fs.exists(hbaseFile));
 
     // Verify file existence after the NameNode restart
     cluster.restartNameNode();
     cluster.waitActive();
-    Assert.assertFalse(fs.exists(hbaseFile));
+    Assertions.assertFalse(fs.exists(hbaseFile));
   }
 
   /**
@@ -770,7 +770,7 @@
     SnapshotTestHelper.createSnapshot(fs, snapRootDir, "test");
 
     t.join();
-    Assert.assertFalse("Client encountered writing error!", writerError.get());
+      Assertions.assertFalse(writerError.get(), "Client encountered writing error!");
 
     restartNameNode();
     cluster.waitActive();
@@ -807,9 +807,9 @@
     final Path hbaseS1Path = new Path(hbaseS1Dir, hbaseFileName);
     final FileChecksum hbaseFileCksumS1 = fs.getFileChecksum(hbaseS1Path);
 
-    // Verify if Snap S1 checksum is same as the current version one
-    Assert.assertEquals("Live and snap1 file checksum doesn't match!",
-        hbaseWALFileCksum0, fs.getFileChecksum(hbaseS1Path));
+      // Verify if Snap S1 checksum is same as the current version one
+      Assertions.assertEquals(
+              hbaseWALFileCksum0, fs.getFileChecksum(hbaseS1Path), "Live and snap1 file checksum doesn't match!");
 
     int newWriteLength = (int) (BLOCKSIZE * 1.5);
     byte[] buf = new byte[newWriteLength];
@@ -823,12 +823,12 @@
     final Path hbaseS2Path = new Path(hbaseS2Dir, hbaseFileName);
     final FileChecksum hbaseFileCksumS2 = fs.getFileChecksum(hbaseS2Path);
 
-    // Verify if the s1 checksum is still the same
-    Assert.assertEquals("Snap file checksum has changed!",
-        hbaseFileCksumS1, fs.getFileChecksum(hbaseS1Path));
-    // Verify if the s2 checksum is different from the s1 checksum
-    Assert.assertNotEquals("Snap1 and snap2 file checksum should differ!",
-        hbaseFileCksumS1, hbaseFileCksumS2);
+      // Verify if the s1 checksum is still the same
+      Assertions.assertEquals(
+              hbaseFileCksumS1, fs.getFileChecksum(hbaseS1Path), "Snap file checksum has changed!");
+      // Verify if the s2 checksum is different from the s1 checksum
+      Assertions.assertNotEquals(
+              hbaseFileCksumS1, hbaseFileCksumS2, "Snap1 and snap2 file checksum should differ!");
 
     newWriteLength = (int) (BLOCKSIZE * 2.5);
     buf = new byte[newWriteLength];
@@ -845,18 +845,18 @@
     hbaseOutputStream.close();
     final FileChecksum hbaseFileCksumBeforeTruncate =
         fs.getFileChecksum(hbaseFile);
-    Assert.assertEquals("Snap3 and before truncate file checksum should match!",
-        hbaseFileCksumBeforeTruncate, hbaseFileCksumS3);
+      Assertions.assertEquals(
+              hbaseFileCksumBeforeTruncate, hbaseFileCksumS3, "Snap3 and before truncate file checksum should match!");
 
     // Truncate the current file and record the after truncate checksum
     long currentFileLen = fs.getFileStatus(hbaseFile).getLen();
     boolean fileTruncated = fs.truncate(hbaseFile, currentFileLen / 2);
-    Assert.assertTrue("File truncation failed!", fileTruncated);
+      Assertions.assertTrue(fileTruncated, "File truncation failed!");
     final FileChecksum hbaseFileCksumAfterTruncate =
         fs.getFileChecksum(hbaseFile);
 
-    Assert.assertNotEquals("Snap3 and after truncate checksum shouldn't match!",
-        hbaseFileCksumS3, hbaseFileCksumAfterTruncate);
+      Assertions.assertNotEquals(
+              hbaseFileCksumS3, hbaseFileCksumAfterTruncate, "Snap3 and after truncate checksum shouldn't match!");
 
     // Append more data to the current file
     hbaseOutputStream = fs.append(hbaseFile);
@@ -876,13 +876,13 @@
     final FileChecksum hbaseFileCksumAfterAppend =
         fs.getFileChecksum(hbaseFile);
 
-    Assert.assertEquals("Snap4 and after append file checksum should match!",
-        hbaseFileCksumAfterAppend, hbaseFileCksumS4);
+      Assertions.assertEquals(
+              hbaseFileCksumAfterAppend, hbaseFileCksumS4, "Snap4 and after append file checksum should match!");
 
     // Recompute checksum for S3 path and verify it has not changed
     hbaseFileCksumS3 = fs.getFileChecksum(hbaseS3Path);
-    Assert.assertEquals("Snap3 and before truncate file checksum should match!",
-        hbaseFileCksumBeforeTruncate, hbaseFileCksumS3);
+      Assertions.assertEquals(
+              hbaseFileCksumBeforeTruncate, hbaseFileCksumS3, "Snap3 and before truncate file checksum should match!");
   }
 
   private Path createSnapshot(Path snapRootDir, String snapName,
@@ -895,7 +895,7 @@
   private void verifyFileSize(long fileSize, Path... filePaths) throws
       IOException {
     for (Path filePath : filePaths) {
-      Assert.assertEquals(fileSize, fs.getFileStatus(filePath).getLen());
+      Assertions.assertEquals(fileSize, fs.getFileStatus(filePath).getLen());
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletion.java
index 8c2c61c..c3c7705 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletion.java
@@ -28,10 +28,10 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -40,7 +40,7 @@
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SNAPSHOT_DELETED;
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test ordered snapshot deletion.
@@ -53,7 +53,7 @@
 
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     final Configuration conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
@@ -62,7 +62,7 @@
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -106,28 +106,28 @@
     final Path snapPathNew =
         SnapshotTestHelper.getSnapshotRoot(snapshottableDir, snapName);
     // Check if the path exists
-    Assert.assertNotNull(cluster.getFileSystem().getFileStatus(snapPathNew));
+    Assertions.assertNotNull(cluster.getFileSystem().getFileStatus(snapPathNew));
 
     // Check xAttr for snapshotRoot
     final INode inode = cluster.getNamesystem().getFSDirectory()
         .getINode(snapPathNew.toString());
     final XAttrFeature f = inode.getXAttrFeature();
     final XAttr xAttr = f.getXAttr(XATTR_SNAPSHOT_DELETED);
-    Assert.assertNotNull(xAttr);
-    Assert.assertEquals(XATTR_SNAPSHOT_DELETED.substring("system.".length()),
+    Assertions.assertNotNull(xAttr);
+    Assertions.assertEquals(XATTR_SNAPSHOT_DELETED.substring("system.".length()),
         xAttr.getName());
-    Assert.assertEquals(XAttr.NameSpace.SYSTEM, xAttr.getNameSpace());
-    Assert.assertNull(xAttr.getValue());
+    Assertions.assertEquals(XAttr.NameSpace.SYSTEM, xAttr.getNameSpace());
+    Assertions.assertNull(xAttr.getValue());
 
     // Check inode
-    Assert.assertTrue(inode instanceof Snapshot.Root);
-    Assert.assertTrue(((Snapshot.Root) inode).isMarkedAsDeleted());
+    Assertions.assertTrue(inode instanceof Snapshot.Root);
+    Assertions.assertTrue(((Snapshot.Root) inode).isMarkedAsDeleted());
   }
 
   static void assertNotMarkedAsDeleted(Path snapshotRoot,
       MiniDFSCluster cluster) throws IOException {
     // Check if the path exists
-    Assert.assertNotNull(cluster.getFileSystem().getFileStatus(snapshotRoot));
+    Assertions.assertNotNull(cluster.getFileSystem().getFileStatus(snapshotRoot));
 
     // Check xAttr for snapshotRoot
     final INode inode = cluster.getNamesystem().getFSDirectory()
@@ -135,12 +135,12 @@
     final XAttrFeature f = inode.getXAttrFeature();
     if (f != null) {
       final XAttr xAttr = f.getXAttr(XATTR_SNAPSHOT_DELETED);
-      Assert.assertNull(xAttr);
+      Assertions.assertNull(xAttr);
     }
 
     // Check inode
-    Assert.assertTrue(inode instanceof Snapshot.Root);
-    Assert.assertFalse(((Snapshot.Root)inode).isMarkedAsDeleted());
+    Assertions.assertTrue(inode instanceof Snapshot.Root);
+    Assertions.assertFalse(((Snapshot.Root)inode).isMarkedAsDeleted());
   }
 
   void assertXAttrSet(String snapshot,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletionGc.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletionGc.java
index 5c3aa8f..b0dca34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletionGc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOrderedSnapshotDeletionGc.java
@@ -29,10 +29,10 @@
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.event.Level;
 
 import java.io.File;
@@ -51,8 +51,8 @@
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.TestOrderedSnapshotDeletion.assertMarkedAsDeleted;
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.TestOrderedSnapshotDeletion.assertNotMarkedAsDeleted;
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.TestOrderedSnapshotDeletion.getDeletedSnapshotName;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test {@link SnapshotDeletionGc}.
@@ -62,7 +62,7 @@
   private static final int NUM_DATANODES = 0;
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     final Configuration conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
@@ -75,7 +75,7 @@
     GenericTestUtils.setLogLevel(SnapshotDeletionGc.LOG, Level.TRACE);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -94,17 +94,17 @@
     final Path sub0 = new Path(snapshottableDir, "sub0");
     hdfs.mkdirs(sub0);
     final Path s0path = hdfs.createSnapshot(snapshottableDir, "s0");
-    Assert.assertTrue(exist(s0path, hdfs));
+    Assertions.assertTrue(exist(s0path, hdfs));
 
     final Path sub1 = new Path(snapshottableDir, "sub1");
     hdfs.mkdirs(sub1);
     final Path s1path = hdfs.createSnapshot(snapshottableDir, "s1");
-    Assert.assertTrue(exist(s1path, hdfs));
+    Assertions.assertTrue(exist(s1path, hdfs));
 
     final Path sub2 = new Path(snapshottableDir, "sub2");
     hdfs.mkdirs(sub2);
     final Path s2path = hdfs.createSnapshot(snapshottableDir, "s2");
-    Assert.assertTrue(exist(s2path, hdfs));
+    Assertions.assertTrue(exist(s2path, hdfs));
 
     assertNotMarkedAsDeleted(s0path, cluster);
     assertNotMarkedAsDeleted(s1path, cluster);
@@ -116,9 +116,9 @@
     assertMarkedAsDeleted(s2path, snapshottableDir, cluster);
     final Path s2pathNew = new Path(s2path.getParent(),
         getDeletedSnapshotName(hdfs, snapshottableDir, s2path.getName()));
-    Assert.assertFalse(exist(s2path, hdfs));
-    Assert.assertTrue(exist(s2pathNew, hdfs));
-    Assert.assertFalse(s2path.equals(s2pathNew));
+    Assertions.assertFalse(exist(s2path, hdfs));
+    Assertions.assertTrue(exist(s2pathNew, hdfs));
+    Assertions.assertFalse(s2path.equals(s2pathNew));
 
     hdfs.deleteSnapshot(snapshottableDir, "s1");
     assertNotMarkedAsDeleted(s0path, cluster);
@@ -126,9 +126,9 @@
     assertMarkedAsDeleted(s2path, snapshottableDir, cluster);
     final Path s1pathNew = new Path(s1path.getParent(),
         getDeletedSnapshotName(hdfs, snapshottableDir, s1path.getName()));
-    Assert.assertFalse(exist(s1path, hdfs));
-    Assert.assertTrue(exist(s1pathNew, hdfs));
-    Assert.assertFalse(s1path.equals(s1pathNew));
+    Assertions.assertFalse(exist(s1path, hdfs));
+    Assertions.assertTrue(exist(s1pathNew, hdfs));
+    Assertions.assertFalse(s1path.equals(s1pathNew));
     // should not be gc'ed
     Thread.sleep(10*GC_PERIOD);
     assertNotMarkedAsDeleted(s0path, cluster);
@@ -136,7 +136,7 @@
     assertMarkedAsDeleted(s2path, snapshottableDir, cluster);
 
     hdfs.deleteSnapshot(snapshottableDir, "s0");
-    Assert.assertFalse(exist(s0path, hdfs));
+    Assertions.assertFalse(exist(s0path, hdfs));
 
     waitForGc(Arrays.asList(s1pathNew, s2pathNew), hdfs);
     // total no of edit log records created for delete snapshot will be equal
@@ -155,7 +155,7 @@
     cluster.shutdown();
 
     File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
-    assertTrue("Should exist: " + editFile, editFile.exists());
+      assertTrue(editFile.exists(), "Should exist: " + editFile);
     EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
     counts = FSImageTestUtil.countEditLogOpTypes(editFile);
     if (editLogOpCount > 0) {
@@ -169,7 +169,7 @@
         .build();
     cluster.waitActive();
     // ensure after the edits get replayed , all the snapshots are deleted
-    Assert.assertEquals(0,
+    Assertions.assertEquals(0,
         cluster.getNamesystem().getSnapshotManager().getNumSnapshots());
   }
 
@@ -235,7 +235,7 @@
       hdfs.mkdirs(sub);
       final Path p = hdfs.createSnapshot(snapshottableDir, "s" + i);
       snapshotPaths.add(p);
-      Assert.assertTrue(exist(p, hdfs));
+      Assertions.assertTrue(exist(p, hdfs));
     }
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java
index 662957f..f0cc924 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRandomOpsWithSnapshots.java
@@ -31,9 +31,9 @@
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -47,10 +47,7 @@
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Testing random FileSystem operations with random Snapshot operations.
@@ -178,7 +175,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     CONFIG.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
     cluster = new MiniDFSCluster.Builder(CONFIG).numDataNodes(REPL).
@@ -190,7 +187,7 @@
     hdfs.mkdirs(WITNESSDIR);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -283,7 +280,7 @@
           break;
 
         default:
-          assertNull("Invalid FileSystem operation", fsOperation);
+            assertNull(fsOperation, "Invalid FileSystem operation");
           break;
         }
       }
@@ -308,7 +305,7 @@
           break;
 
         default:
-          assertNull("Invalid Snapshot operation", snapshotOperation);
+            assertNull(snapshotOperation, "Invalid Snapshot operation");
           break;
         }
       }
@@ -331,7 +328,7 @@
               TESTDIRSTRING, WITNESSDIRSTRING));
         }
         hdfs.mkdirs(newDir);
-        assertTrue("Directory exists", hdfs.exists(newDir));
+          assertTrue(hdfs.exists(newDir), "Directory exists");
         LOG.info("Directory created: " + newDir);
         numberDirectoryCreated++;
       }
@@ -353,7 +350,7 @@
                 TESTDIRSTRING, WITNESSDIRSTRING));
           }
           hdfs.delete(deleteDir, true);
-          assertFalse("Directory does not exist", hdfs.exists(deleteDir));
+            assertFalse(hdfs.exists(deleteDir), "Directory does not exist");
           if (!isWitnessDir) {
             snapshottableDirectories.remove(deleteDir);
           }
@@ -380,9 +377,9 @@
                 TESTDIRSTRING, WITNESSDIRSTRING));
           }
           hdfs.rename(oldDir, newDir, Options.Rename.OVERWRITE);
-          assertTrue("Target directory exists", hdfs.exists(newDir));
-          assertFalse("Source directory does not exist",
-              hdfs.exists(oldDir));
+            assertTrue(hdfs.exists(newDir), "Target directory exists");
+            assertFalse(
+                    hdfs.exists(oldDir), "Source directory does not exist");
 
           if (dir == OperationDirectories.TestDir) {
             snapshottableDirectories.remove(oldDir);
@@ -482,7 +479,7 @@
               TESTDIRSTRING, WITNESSDIRSTRING));
         }
         hdfs.createNewFile(newFile);
-        assertTrue("File exists", hdfs.exists(newFile));
+          assertTrue(hdfs.exists(newFile), "File exists");
         LOG.info("createTestFile, file created: " + newFile);
         numberFileCreated++;
       }
@@ -505,8 +502,8 @@
                   TESTDIRSTRING, WITNESSDIRSTRING));
             }
             hdfs.delete(deleteFile, false);
-            assertFalse("File does not exists",
-                hdfs.exists(deleteFile));
+              assertFalse(
+                      hdfs.exists(deleteFile), "File does not exists");
             LOG.info("deleteTestFile, file deleted: " + deleteFile);
             numberFileDeleted++;
           }
@@ -536,8 +533,8 @@
             }
 
             hdfs.rename(oldFile, newFile, Options.Rename.OVERWRITE);
-            assertTrue("Target file exists", hdfs.exists(newFile));
-            assertFalse("Source file does not exist", hdfs.exists(oldFile));
+              assertTrue(hdfs.exists(newFile), "Target file exists");
+              assertFalse(hdfs.exists(oldFile), "Source file does not exist");
             LOG.info("Renamed file: " + oldFile + " to file: " + newFile);
             numberFileRenamed++;
           }
@@ -597,9 +594,9 @@
       }
     }, 10, 100000);
 
-    assertTrue("NameNode is up", cluster.getNameNode().isActiveState());
-    assertTrue("DataNode is up and running", cluster.isDataNodeUp());
-    assertTrue("Cluster is up and running", cluster.isClusterUp());
+      assertTrue(cluster.getNameNode().isActiveState(), "NameNode is up");
+      assertTrue(cluster.isDataNodeUp(), "DataNode is up and running");
+      assertTrue(cluster.isClusterUp(), "Cluster is up and running");
     LOG.info("checkClusterHealth, cluster is healthy.");
 
     printOperationStats();
@@ -639,14 +636,14 @@
       }
       filename += "file" + i;
       createFile(filename, fileLength, true);
-      assertTrue("Test file created", hdfs.exists(new Path(filename)));
+        assertTrue(hdfs.exists(new Path(filename)), "Test file created");
       LOG.info("createFiles, file: " + filename + "was created");
 
       String witnessFile =
           filename.replaceAll(TESTDIRSTRING, WITNESSDIRSTRING);
       createFile(witnessFile, fileLength, false);
-      assertTrue("Witness file exists",
-          hdfs.exists(new Path(witnessFile)));
+        assertTrue(
+                hdfs.exists(new Path(witnessFile)), "Witness file exists");
       LOG.info("createFiles, file: " + witnessFile + "was created");
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithOrderedSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithOrderedSnapshotDeletion.java
index 052610b..e17670e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithOrderedSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithOrderedSnapshotDeletion.java
@@ -22,16 +22,17 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 
 import java.io.IOException;
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
 import static org.apache.hadoop.hdfs.server.namenode.FSNamesystem.DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED;
+
 /**
  * Test Rename with ordered snapshot deletion.
  */
@@ -41,7 +42,7 @@
   private DistributedFileSystem hdfs;
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     final Configuration conf = new Configuration();
     conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
@@ -52,7 +53,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -101,9 +102,9 @@
   private void validateRename(Path src, Path dest) {
     try {
       hdfs.rename(src, dest);
-      Assert.fail("Expected exception not thrown.");
+      Assertions.fail("Expected exception not thrown.");
     } catch (IOException ioe) {
-      Assert.assertTrue(ioe.getMessage().contains("are not under the" +
+      Assertions.assertTrue(ioe.getMessage().contains("are not under the" +
           " same snapshot root."));
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index 3bfe971..fce57d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -41,10 +41,10 @@
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 import java.io.File;
@@ -54,11 +54,7 @@
 import java.util.List;
 import java.util.Random;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyInt;
@@ -99,7 +95,7 @@
     assertEquals(deletedSize, diff.getDeletedUnmodifiable().size());
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).format(true)
@@ -112,7 +108,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -149,20 +145,20 @@
     
     final INode fooRef = fsdir.getINode(
         SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
-    Assert.assertTrue(fooRef.isReference());
-    Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
+    Assertions.assertTrue(fooRef.isReference());
+    Assertions.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
 
     final INodeReference.WithCount withCount
         = (INodeReference.WithCount)fooRef.asReference().getReferredINode();
-    Assert.assertEquals(2, withCount.getReferenceCount());
+    Assertions.assertEquals(2, withCount.getReferenceCount());
 
     final INode barRef = fsdir.getINode(bar.toString());
-    Assert.assertTrue(barRef.isReference());
+    Assertions.assertTrue(barRef.isReference());
 
-    Assert.assertSame(withCount, barRef.asReference().getReferredINode());
+    Assertions.assertSame(withCount, barRef.asReference().getReferredINode());
     
     hdfs.delete(bar, false);
-    Assert.assertEquals(1, withCount.getReferenceCount());
+    Assertions.assertEquals(1, withCount.getReferenceCount());
   }
   
   private static boolean existsInDiffReport(List<DiffReportEntry> entries,
@@ -344,10 +340,10 @@
             .asReference();
     INodeReference.WithCount withCount = (WithCount) ref
             .getReferredINode();
-    Assert.assertEquals(withCount.getReferenceCount(), 1);
+    Assertions.assertEquals(withCount.getReferenceCount(), 1);
     // Ensure name list is empty for the reference sub3file3Inode
-    Assert.assertNull(withCount.getLastWithName());
-    Assert.assertTrue(sub3file3Inode.isInCurrentState());
+    Assertions.assertNull(withCount.getLastWithName());
+    Assertions.assertTrue(sub3file3Inode.isInCurrentState());
   }
 
   /**
@@ -2207,8 +2203,8 @@
     
     final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
         "dir2/foo");
-    assertTrue("the snapshot path " + foo_s0 + " should exist",
-        hdfs.exists(foo_s0));
+      assertTrue(
+              hdfs.exists(foo_s0), "the snapshot path " + foo_s0 + " should exist");
     
     // delete snapshot s0. The deletion will first go down through dir1, and 
     // find foo in the created list of dir1. Then it will use null as the prior
@@ -2216,13 +2212,13 @@
     // foo. We need to make sure the snapshot s0 can be deleted cleanly in the
     // foo subtree.
     hdfs.deleteSnapshot(test, "s0");
-    // check the internal
-    assertFalse("after deleting s0, " + foo_s0 + " should not exist",
-        hdfs.exists(foo_s0));
+      // check the internal
+      assertFalse(
+              hdfs.exists(foo_s0), "after deleting s0, " + foo_s0 + " should not exist");
     INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
         .asDirectory();
-    assertTrue("the diff list of " + dir2
-        + " should be empty after deleting s0", !dir2Node.isWithSnapshot());
+      assertTrue(!dir2Node.isWithSnapshot(), "the diff list of " + dir2
+              + " should be empty after deleting s0");
     
     assertTrue(hdfs.exists(newfoo));
     INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
index 2459b78..f4cc982 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.List;
 
@@ -38,11 +35,9 @@
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestSetQuotaWithSnapshot {
   protected static final long seed = 0;
@@ -54,11 +49,8 @@
   protected FSNamesystem fsn;
   protected FSDirectory fsdir;
   protected DistributedFileSystem hdfs;
-  
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -71,7 +63,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapRootDescendantDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapRootDescendantDiff.java
index d2be71d..357f49b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapRootDescendantDiff.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapRootDescendantDiff.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
+
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -25,15 +26,15 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test snapshot diff report for the snapshot root descendant directory.
  */
 public class TestSnapRootDescendantDiff extends TestSnapshotDiffReport {
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setBoolean(
@@ -51,7 +52,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
index 0a262f8..4887915 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
@@ -17,12 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -61,11 +56,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This class tests snapshot functionality. One or multiple snapshots are
@@ -100,9 +93,6 @@
   
   private static final String testDir =
       GenericTestUtils.getTestDir().getAbsolutePath();
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
   
   /**
    * The list recording all previous snapshots. Each element in the array
@@ -114,7 +104,7 @@
    */
   private TestDirectoryTree dirTree;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -128,7 +118,7 @@
     dirTree = new TestDirectoryTree(DIRECTORY_TREE_LEVEL, hdfs);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -256,7 +246,7 @@
     File originalFsimage = FSImageTestUtil.findLatestImageFile(
         FSImageTestUtil.getFSImage(
         cluster.getNameNode()).getStorage().getStorageDir(0));
-    assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
+      assertNotNull(originalFsimage, "Didn't generate or can't find fsimage");
     PrintStream o = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
     PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
     v.visit(new RandomAccessFile(originalFsimage, "r"));
@@ -749,7 +739,7 @@
             
             SnapshotTestHelper.dumpTree(s, cluster);
           }
-          assertEquals(s, currentStatus.toString(), originalStatus.toString());
+            assertEquals(currentStatus.toString(), originalStatus.toString(), s);
         }
       }
     }
@@ -860,7 +850,7 @@
               + "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
           SnapshotTestHelper.dumpTree(s, cluster);
         }
-        assertEquals(s, originalSnapshotFileLen, currentSnapshotFileLen);
+          assertEquals(originalSnapshotFileLen, currentSnapshotFileLen, s);
         // Read the snapshot file out of the boundary
         if (currentSnapshotFileLen != -1L
             && !(this instanceof FileAppendNotClose)) {
@@ -875,7 +865,7 @@
                 + "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
             SnapshotTestHelper.dumpTree(s, cluster);
           }
-          assertEquals(s, -1, readLen);
+            assertEquals(-1, readLen, s);
           input.close();
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
index ff45c0a..7fbcaae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
@@ -19,11 +19,7 @@
 
 import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 
@@ -43,10 +39,10 @@
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test cases for snapshot-related information in blocksMap.
@@ -66,7 +62,7 @@
   BlockManager blockmanager;
   protected DistributedFileSystem hdfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -80,7 +76,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -100,8 +96,8 @@
 
   static void assertBlockCollection(final BlockManager blkManager,
       final INodeFile file, final BlockInfo b) {
-    Assert.assertSame(b, blkManager.getStoredBlock(b));
-    Assert.assertEquals(file.getId(), b.getBlockCollectionId());
+    Assertions.assertSame(b, blkManager.getStoredBlock(b));
+    Assertions.assertEquals(file.getId(), b.getBlockCollectionId());
   }
 
   /**
@@ -150,7 +146,7 @@
     {
       INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir,
           blockmanager);
-      Assert.assertSame(INodeFile.class, f1.getClass());
+      Assertions.assertSame(INodeFile.class, f1.getClass());
       hdfs.setReplication(file1, (short)2);
       f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
       assertTrue(f1.isWithSnapshot());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index b85530c..3f96467 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayOutputStream;
 import java.io.FileNotFoundException;
@@ -59,11 +59,11 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -93,7 +93,7 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
@@ -106,7 +106,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -210,15 +210,15 @@
     INodeDirectory dirNode = getDir(fsdir, dirPath);
     assertTrue(dirNode.isQuotaSet());
     QuotaCounts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
-    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
-        q.getNameSpace());
-    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
-        q.getStorageSpace());
+      assertEquals(expectedNs,
+              q.getNameSpace(), dirNode.dumpTreeRecursively().toString());
+      assertEquals(expectedDs,
+              q.getStorageSpace(), dirNode.dumpTreeRecursively().toString());
     QuotaCounts counts = dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), false);
-    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
-        counts.getNameSpace());
-    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
-        counts.getStorageSpace());
+      assertEquals(expectedNs,
+              counts.getNameSpace(), dirNode.dumpTreeRecursively().toString());
+      assertEquals(expectedDs,
+              counts.getStorageSpace(), dirNode.dumpTreeRecursively().toString());
   }
   
   /**
@@ -1232,13 +1232,13 @@
 
     // make sure bar has been removed from its parent
     INode p = fsdir.getInode(parentId);
-    Assert.assertNotNull(p);
+    Assertions.assertNotNull(p);
     INodeDirectory pd = p.asDirectory();
-    Assert.assertNotNull(pd);
-    Assert.assertNull(pd.getChild("bar".getBytes(), Snapshot.CURRENT_STATE_ID));
+    Assertions.assertNotNull(pd);
+    Assertions.assertNull(pd.getChild("bar".getBytes(), Snapshot.CURRENT_STATE_ID));
 
     // make sure bar has been cleaned from inodeMap
-    Assert.assertNull(fsdir.getInode(fileId));
+    Assertions.assertNull(fsdir.getInode(fileId));
   }
 
   @Test
@@ -1342,10 +1342,10 @@
 
       SnapshotDiffReport sdr = hdfs.getSnapshotDiffReport(st, "s" + i, "ss");
       LOG.info("Snapshot Diff s{} to ss : {}", i, sdr);
-      Assert.assertEquals(sdr.getDiffList().size(), 1);
-      Assert.assertTrue(sdr.getDiffList().get(0).getType() ==
+      Assertions.assertEquals(sdr.getDiffList().size(), 1);
+      Assertions.assertTrue(sdr.getDiffList().get(0).getType() ==
           SnapshotDiffReport.DiffType.MODIFY);
-      Assert.assertTrue(new Path(st, DFSUtilClient.bytes2String(
+      Assertions.assertTrue(new Path(st, DFSUtilClient.bytes2String(
           sdr.getDiffList().get(0).getSourcePath())).equals(dest));
     }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
index efe59ca..baef5a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.text.SimpleDateFormat;
@@ -56,11 +53,11 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -85,7 +82,7 @@
   protected DistributedFileSystem hdfs;
   private final HashMap<Path, Integer> snapshotNumberMap = new HashMap<Path, Integer>();
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setBoolean(
@@ -104,7 +101,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -317,7 +314,7 @@
 
   @Test(timeout = 60000)
   public void testSnapRootDescendantDiffReport() throws Exception {
-    Assume.assumeTrue(conf.getBoolean(
+    Assumptions.assumeTrue(conf.getBoolean(
         DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT,
         DFSConfigKeys.
             DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT_DEFAULT));
@@ -578,7 +575,7 @@
 
   @Test
   public void testSnapRootDescendantDiffReportWithRename() throws Exception {
-    Assume.assumeTrue(conf.getBoolean(
+    Assumptions.assumeTrue(conf.getBoolean(
         DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT,
         DFSConfigKeys.
             DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT_DEFAULT));
@@ -824,7 +821,7 @@
     final SnapshotDiffReport report =
         hdfs.getSnapshotDiffReport(testdir, "s0", "");
     // The diff should be null. Snapshot dir inode should keep the quota.
-    Assert.assertEquals(0, report.getDiffList().size());
+    Assertions.assertEquals(0, report.getDiffList().size());
     // Cleanup
     hdfs.deleteSnapshot(testdir, "s0");
     hdfs.disallowSnapshot(testdir);
@@ -1075,7 +1072,7 @@
     final long flumeFileLengthAfterS1 = hdfs.getFileStatus(flumeFile).getLen();
 
     // Verify if Snap S1 file length is same as the the live one
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         hdfs.getFileStatus(flumeS1Path).getLen());
 
     verifyDiffReport(level0A, flumeSnap1Name, "",
@@ -1097,10 +1094,10 @@
 
     // Verify live files length is same as all data written till now
     final long flumeFileLengthAfterS2 = hdfs.getFileStatus(flumeFile).getLen();
-    Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
+    Assertions.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
 
     // Verify if Snap S2 file length is same as the live one
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         hdfs.getFileStatus(flumeS2Path).getLen());
 
     verifyDiffReport(level0A, flumeSnap1Name, "",
@@ -1120,22 +1117,22 @@
 
     // Verify old flume snapshots have point-in-time / frozen file lengths
     // even after the live file have moved forward.
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         hdfs.getFileStatus(flumeS1Path).getLen());
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         hdfs.getFileStatus(flumeS2Path).getLen());
 
     flumeOutputStream.close();
 
     // Verify if Snap S2 file length is same as the live one
-    Assert.assertEquals(flumeFileWrittenDataLength,
+    Assertions.assertEquals(flumeFileWrittenDataLength,
         hdfs.getFileStatus(flumeFile).getLen());
 
     // Verify old flume snapshots have point-in-time / frozen file lengths
     // even after the live file have moved forward.
-    Assert.assertEquals(flumeFileLengthAfterS1,
+    Assertions.assertEquals(flumeFileLengthAfterS1,
         hdfs.getFileStatus(flumeS1Path).getLen());
-    Assert.assertEquals(flumeFileLengthAfterS2,
+    Assertions.assertEquals(flumeFileLengthAfterS2,
         hdfs.getFileStatus(flumeS2Path).getLen());
 
     verifyDiffReport(level0A, flumeSnap1Name, "",
@@ -1554,10 +1551,10 @@
     try {
       iterator.next();
     } catch (Exception e) {
-      Assert.assertTrue(
+      Assertions.assertTrue(
           e.getMessage().contains("No more entry in SnapshotDiffReport for /"));
     }
-    Assert.assertNotEquals(0, reportList.size());
+    Assertions.assertNotEquals(0, reportList.size());
     // generate the snapshotDiffReport and Verify
     snapshotDiffReport = new SnapshotDiffReportGenerator("/", "s0", "s1",
         report.getIsFromEarlier(), modifiedList, createdList, deletedList);
@@ -1598,7 +1595,7 @@
     try {
       hdfs.snapshotDiffReportListingRemoteIterator(root, "s0", "");
     } catch (Exception e) {
-      Assert.assertTrue(e.getMessage().contains("Remote Iterator is"
+      Assertions.assertTrue(e.getMessage().contains("Remote Iterator is"
           + "supported for snapshotDiffReport between two snapshots"));
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
index 814da03..44ec970 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
@@ -26,15 +26,14 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -60,7 +59,7 @@
   private final String file1Name = "file1";
   private final String snapshot1 = "snapshot1";
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCKSIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCKSIZE);
@@ -70,7 +69,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -194,10 +193,10 @@
 
     // Make sure we can read the entire file via its non-snapshot path.
     fileStatus = hdfs.getFileStatus(file1);
-    assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
+      assertEquals(BLOCKSIZE * 2, fileStatus.getLen(), "Unexpected file length");
     fis = hdfs.open(file1);
     bytesRead = fis.read(buffer, 0, buffer.length);
-    assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
+      assertEquals(BLOCKSIZE * 2, bytesRead, "Unexpected # bytes read");
     fis.close();
 
     Path file1snap1 =
@@ -207,7 +206,7 @@
     assertEquals(fileStatus.getLen(), BLOCKSIZE);
     // Make sure we can only read up to the snapshot length.
     bytesRead = fis.read(buffer, 0, buffer.length);
-    assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
+      assertEquals(BLOCKSIZE, bytesRead, "Unexpected # bytes read");
     fis.close();
 
     PrintStream outBackup = System.out;
@@ -220,7 +219,7 @@
     try {
       ToolRunner.run(conf, shell, new String[] { "-cat",
       "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
-      assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
+        assertEquals(BLOCKSIZE, bao.size(), "Unexpected # bytes from -cat");
     } finally {
       System.setOut(outBackup);
       System.setErr(errBackup);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java
index 03a2ff4..121f025 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
@@ -29,9 +29,9 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestSnapshotListing {
 
@@ -46,7 +46,7 @@
   FSNamesystem fsn;
   DistributedFileSystem hdfs;
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
@@ -57,7 +57,7 @@
     hdfs.mkdirs(dir);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
index af7ed18..c1df41a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
@@ -37,8 +37,8 @@
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 import java.io.IOException;
 
@@ -98,9 +98,9 @@
     try {
       sm.createSnapshot(leaseManager, iip, "dummy", "shouldFailSnapshot",
           Time.now());
-      Assert.fail("Expected SnapshotException not thrown");
+      Assertions.fail("Expected SnapshotException not thrown");
     } catch (SnapshotException se) {
-      Assert.assertTrue(
+      Assertions.assertTrue(
           StringUtils.toLowerCase(se.getMessage()).contains(errMsg));
     }
 
@@ -118,10 +118,10 @@
       // in case the snapshot ID limit is hit, further creation of snapshots
       // even post deletions of snapshots won't succeed
       if (maxSnapID < maxSnapshotLimit) {
-        Assert.fail("CreateSnapshot should succeed");
+        Assertions.fail("CreateSnapshot should succeed");
       }
     } catch (SnapshotException se) {
-      Assert.assertTrue(
+      Assertions.assertTrue(
           StringUtils.toLowerCase(se.getMessage()).contains(errMsg));
     }
   }
@@ -134,7 +134,7 @@
     FSDirectory fsdir = mock(FSDirectory.class);
     SnapshotManager snapshotManager = new SnapshotManager(new Configuration(),
         fsdir);
-    Assert.assertTrue(snapshotManager.
+    Assertions.assertTrue(snapshotManager.
         getMaxSnapshotID() < Snapshot.CURRENT_STATE_ID);
   }
 
@@ -171,10 +171,10 @@
           getSnapshotManager();
 
       // make sure edits of all previous 5 create snapshots are replayed
-      Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+      Assertions.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
 
       // make sure namenode has the new snapshot limit configured as 2
-      Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+      Assertions.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
 
       // Any new snapshot creation should still fail
       LambdaTestUtils.intercept(SnapshotException.class,
@@ -186,10 +186,10 @@
       snapshotManager = cluster.getNamesystem().
           getSnapshotManager();
       // make sure edits of all previous 5 create snapshots are replayed
-      Assert.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
+      Assertions.assertEquals(numSnapshots, snapshotManager.getNumSnapshots());
 
       // make sure namenode has the new snapshot limit configured as 2
-      Assert.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
+      Assertions.assertEquals(2, snapshotManager.getMaxSnapshotLimit());
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java
index f64caff..e519294 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java
@@ -20,7 +20,7 @@
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -28,9 +28,9 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test the snapshot-related metrics
@@ -52,7 +52,7 @@
   private MiniDFSCluster cluster;
   private DistributedFileSystem hdfs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf)
@@ -65,7 +65,7 @@
     DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java
index c746663..0e76382 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java
@@ -24,9 +24,9 @@
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ipc.RemoteException;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestSnapshotNameWithInvalidCharacters {
   private static final long SEED = 0;
@@ -42,7 +42,7 @@
   private final String snapshot1 = "a:b:c";
   private final String snapshot2 = "a/b/c";
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
                                               .build();
@@ -50,7 +50,7 @@
     hdfs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
index 818f56a..94179ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
@@ -43,10 +40,10 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 
 /**
@@ -68,7 +65,7 @@
   DistributedFileSystem hdfs;
   FSDirectory fsdir;
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
@@ -79,7 +76,7 @@
     fsdir = fsn.getFSDirectory();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
index 20cb270..4554352 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -35,9 +35,9 @@
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This class tests the replication handling/calculation of snapshots to make
@@ -61,7 +61,7 @@
   DistributedFileSystem hdfs;
   FSDirectory fsdir;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUMDATANODE)
@@ -72,7 +72,7 @@
     fsdir = fsn.getFSDirectory();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotStatsMXBean.java
index e65084f..a03e28e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotStatsMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotStatsMXBean.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.lang.management.ManagementFactory;
 import java.lang.reflect.Array;
@@ -31,7 +31,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestSnapshotStatsMXBean {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
index 5611eb9..b5e3647 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
@@ -19,8 +19,8 @@
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Options.Rename;
@@ -32,9 +32,9 @@
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestSnapshottableDirListing {
 
@@ -51,7 +51,7 @@
   FSNamesystem fsn;
   DistributedFileSystem hdfs;
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
@@ -63,7 +63,7 @@
     hdfs.mkdirs(dir2);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestUpdatePipelineWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestUpdatePipelineWithSnapshots.java
index c8955fa..f7f7bb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestUpdatePipelineWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestUpdatePipelineWithSnapshots.java
@@ -32,8 +32,9 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.IOUtils;
+import org.junit.jupiter.api.Test;
+
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import org.junit.Test;
 
 public class TestUpdatePipelineWithSnapshots {
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
index 2c93e12..6409a64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.util.EnumSet;
 import java.util.Map;
@@ -40,12 +40,12 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.ExpectedException;
 
 /**
@@ -70,14 +70,14 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     initCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws Exception {
     IOUtils.cleanupWithLogger(null, hdfs);
     if (cluster != null) {
@@ -85,7 +85,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() {
     ++pathCount;
     path = new Path("/p" + pathCount);
@@ -186,14 +186,14 @@
 
     // Both original and snapshot have same XAttrs.
     Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
 
     xattrs = hdfs.getXAttrs(snapshotPath);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
 
     // Original XAttrs have changed, but snapshot still has old XAttrs.
     hdfs.setXAttr(path, name1, newValue1);
@@ -208,14 +208,14 @@
   private static void doSnapshotRootChangeAssertions(Path path,
       Path snapshotPath) throws Exception {
     Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
 
     xattrs = hdfs.getXAttrs(snapshotPath);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
   }
 
   /**
@@ -233,14 +233,14 @@
 
     // Both original and snapshot have same XAttrs.
     Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
 
     xattrs = hdfs.getXAttrs(snapshotPath);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(xattrs.size(), 2);
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
 
     // Original XAttrs have been removed, but snapshot still has old XAttrs.
     hdfs.removeXAttr(path, name1);
@@ -256,12 +256,12 @@
   private static void doSnapshotRootRemovalAssertions(Path path,
       Path snapshotPath) throws Exception {
     Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
-    Assert.assertEquals(0, xattrs.size());
+    Assertions.assertEquals(0, xattrs.size());
 
     xattrs = hdfs.getXAttrs(snapshotPath);
-    Assert.assertEquals(2, xattrs.size());
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(2, xattrs.size());
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
   }
 
   /**
@@ -276,45 +276,45 @@
     hdfs.setXAttr(path, name1, value1);
     SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
     Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotPath);
-    Assert.assertEquals(1, xattrs.size());
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertEquals(1, xattrs.size());
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
 
     // Second snapshot
     hdfs.setXAttr(path, name1, newValue1);
     hdfs.setXAttr(path, name2, value2);
     SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName2);
     xattrs = hdfs.getXAttrs(snapshotPath2);
-    Assert.assertEquals(2, xattrs.size());
-    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(2, xattrs.size());
+    Assertions.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
 
     // Third snapshot
     hdfs.setXAttr(path, name1, value1);
     hdfs.removeXAttr(path, name2);
     SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName3);
     xattrs = hdfs.getXAttrs(snapshotPath3);
-    Assert.assertEquals(1, xattrs.size());
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertEquals(1, xattrs.size());
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
 
     // Check that the first and second snapshots'
     // XAttrs have stayed constant
     xattrs = hdfs.getXAttrs(snapshotPath);
-    Assert.assertEquals(1, xattrs.size());
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertEquals(1, xattrs.size());
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
     xattrs = hdfs.getXAttrs(snapshotPath2);
-    Assert.assertEquals(2, xattrs.size());
-    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    Assertions.assertEquals(2, xattrs.size());
+    Assertions.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assertions.assertArrayEquals(value2, xattrs.get(name2));
 
     // Remove the second snapshot and verify the first and
     // third snapshots' XAttrs have stayed constant
     hdfs.deleteSnapshot(path, snapshotName2);
     xattrs = hdfs.getXAttrs(snapshotPath);
-    Assert.assertEquals(1, xattrs.size());
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertEquals(1, xattrs.size());
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
     xattrs = hdfs.getXAttrs(snapshotPath3);
-    Assert.assertEquals(1, xattrs.size());
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    Assertions.assertEquals(1, xattrs.size());
+    Assertions.assertArrayEquals(value1, xattrs.get(name1));
 
     hdfs.deleteSnapshot(path, snapshotName);
     hdfs.deleteSnapshot(path, snapshotName3);
@@ -356,7 +356,7 @@
     String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(),
         snapshotCopy.toUri().toString() };
     int ret = ToolRunner.run(new FsShell(conf), argv);
-    assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
+      assertEquals(SUCCESS, ret, "cp -px is not working on a snapshot");
 
     Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotCopy);
     assertArrayEquals(value1, xattrs.get(name1));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
index 4437086..f3e055d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
@@ -18,9 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.sps;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.HashMap;
 import java.util.HashSet;
@@ -35,9 +33,9 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.StorageTypeNodePair;
 import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 
 /**
@@ -50,7 +48,7 @@
   private BlockStorageMovementNeeded unsatisfiedStorageMovementFiles;
   private final int selfRetryTimeout = 500;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     Configuration config = new HdfsConfiguration();
     Context ctxt = Mockito.mock(ExternalSPSContext.class);
@@ -64,7 +62,7 @@
         unsatisfiedStorageMovementFiles, ctxt);
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (bsmAttemptedItems != null) {
       bsmAttemptedItems.stop();
@@ -108,8 +106,8 @@
     bsmAttemptedItems.add(0L, 0L, 0L, blocksMap, 0);
     bsmAttemptedItems.notifyReportedBlock(dnInfo, StorageType.ARCHIVE,
         block);
-    assertEquals("Failed to receive result!", 1,
-        bsmAttemptedItems.getMovementFinishedBlocksCount());
+      assertEquals(1,
+              bsmAttemptedItems.getMovementFinishedBlocksCount(), "Failed to receive result!");
   }
 
   /**
@@ -125,10 +123,10 @@
     Map<Block, Set<StorageTypeNodePair>> blocksMap = new HashMap<>();
     blocksMap.put(block, locs);
     bsmAttemptedItems.add(0L, 0L, 0L, blocksMap, 0);
-    assertEquals("Shouldn't receive result", 0,
-        bsmAttemptedItems.getMovementFinishedBlocksCount());
-    assertEquals("Item doesn't exist in the attempted list", 1,
-        bsmAttemptedItems.getAttemptedItemsCount());
+      assertEquals(0,
+              bsmAttemptedItems.getMovementFinishedBlocksCount(), "Shouldn't receive result");
+      assertEquals(1,
+              bsmAttemptedItems.getAttemptedItemsCount(), "Item doesn't exist in the attempted list");
   }
 
   /**
@@ -155,10 +153,10 @@
 
     // start block movement report monitor thread
     bsmAttemptedItems.start();
-    assertTrue("Failed to add to the retry list",
-        checkItemMovedForRetry(trackID, 5000));
-    assertEquals("Failed to remove from the attempted list", 0,
-        bsmAttemptedItems.getAttemptedItemsCount());
+      assertTrue(
+              checkItemMovedForRetry(trackID, 5000), "Failed to add to the retry list");
+      assertEquals(0,
+              bsmAttemptedItems.getAttemptedItemsCount(), "Failed to remove from the attempted list");
   }
 
   /**
@@ -185,10 +183,10 @@
     bsmAttemptedItems.blocksStorageMovementUnReportedItemsCheck();
     bsmAttemptedItems.blockStorageMovementReportedItemsCheck();
 
-    assertTrue("Failed to add to the retry list",
-        checkItemMovedForRetry(trackID, 5000));
-    assertEquals("Failed to remove from the attempted list", 0,
-        bsmAttemptedItems.getAttemptedItemsCount());
+      assertTrue(
+              checkItemMovedForRetry(trackID, 5000), "Failed to add to the retry list");
+      assertEquals(0,
+              bsmAttemptedItems.getAttemptedItemsCount(), "Failed to remove from the attempted list");
   }
 
   /**
@@ -209,11 +207,11 @@
     bsmAttemptedItems.add(trackID, trackID, 0L, blocksMap, 0);
     bsmAttemptedItems.notifyReportedBlock(dnInfo, StorageType.ARCHIVE,
         block);
-    assertFalse(
-        "Should not add in queue again if it is not there in"
-            + " storageMovementAttemptedItems",
-        checkItemMovedForRetry(trackID, 5000));
-    assertEquals("Failed to remove from the attempted list", 1,
-        bsmAttemptedItems.getAttemptedItemsCount());
+      assertFalse(
+              checkItemMovedForRetry(trackID, 5000),
+              "Should not add in queue again if it is not there in"
+                      + " storageMovementAttemptedItems");
+      assertEquals(1,
+              bsmAttemptedItems.getAttemptedItemsCount(), "Failed to remove from the attempted list");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
index 664f459..86f914e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -46,9 +46,9 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -83,7 +83,7 @@
   /**
    * Initialize erasure coding policy.
    */
-  @Before
+  @BeforeEach
   public void init(){
     ecPolicy = getEcPolicy();
     dataBlocks = ecPolicy.getNumDataUnits();
@@ -165,7 +165,7 @@
           fileLen);
       for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
         for (StorageType type : lb.getStorageTypes()) {
-          Assert.assertEquals(StorageType.DISK, type);
+          Assertions.assertEquals(StorageType.DISK, type);
         }
       }
       StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
@@ -275,7 +275,7 @@
           fileLen);
       for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
         for (StorageType type : lb.getStorageTypes()) {
-          Assert.assertEquals(StorageType.DISK, type);
+          Assertions.assertEquals(StorageType.DISK, type);
         }
       }
       Thread.sleep(5000);
@@ -479,7 +479,7 @@
           fileLen);
       for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
         for (StorageType type : lb.getStorageTypes()) {
-          Assert.assertEquals(StorageType.DISK, type);
+          Assertions.assertEquals(StorageType.DISK, type);
         }
       }
       StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java
index 8c0aa0c..3ff7016 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java
@@ -21,7 +21,7 @@
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressTestHelper.*;
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Status.*;
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -34,14 +34,14 @@
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestStartupProgress {
 
   private StartupProgress startupProgress;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     startupProgress = new StartupProgress();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgressMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgressMetrics.java
index 4fe3d15..527497b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgressMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgressMetrics.java
@@ -21,18 +21,18 @@
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressTestHelper.*;
 import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType.*;
 import static org.apache.hadoop.test.MetricsAsserts.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestStartupProgressMetrics {
 
   private StartupProgress startupProgress;
   private StartupProgressMetrics metrics;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     mockMetricsSystem();
     startupProgress = new StartupProgress();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindow.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindow.java
index 804c641..95c161c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindow.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindow.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top.window;
 
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 public class TestRollingWindow {
 
@@ -30,32 +30,32 @@
   public void testBasics() {
     RollingWindow window = new RollingWindow(WINDOW_LEN, BUCKET_CNT);
     long time = 1;
-    Assert.assertEquals("The initial sum of rolling window must be 0", 0,
-        window.getSum(time));
+      Assertions.assertEquals(0,
+              window.getSum(time), "The initial sum of rolling window must be 0");
     time = WINDOW_LEN + BUCKET_LEN * 3 / 2;
-    Assert.assertEquals("The initial sum of rolling window must be 0", 0,
-        window.getSum(time));
+      Assertions.assertEquals(0,
+              window.getSum(time), "The initial sum of rolling window must be 0");
 
     window.incAt(time, 5);
-    Assert.assertEquals(
-        "The sum of rolling window does not reflect the recent update", 5,
-        window.getSum(time));
+      Assertions.assertEquals(5,
+              window.getSum(time),
+              "The sum of rolling window does not reflect the recent update");
 
     time += BUCKET_LEN;
     window.incAt(time, 6);
-    Assert.assertEquals(
-        "The sum of rolling window does not reflect the recent update", 11,
-        window.getSum(time));
+      Assertions.assertEquals(11,
+              window.getSum(time),
+              "The sum of rolling window does not reflect the recent update");
 
     time += WINDOW_LEN - BUCKET_LEN;
-    Assert.assertEquals(
-        "The sum of rolling window does not reflect rolling effect", 6,
-        window.getSum(time));
+      Assertions.assertEquals(6,
+              window.getSum(time),
+              "The sum of rolling window does not reflect rolling effect");
 
     time += BUCKET_LEN;
-    Assert.assertEquals(
-        "The sum of rolling window does not reflect rolling effect", 0,
-        window.getSum(time));
+      Assertions.assertEquals(0,
+              window.getSum(time),
+              "The sum of rolling window does not reflect rolling effect");
   }
 
   @Test
@@ -65,20 +65,20 @@
     window.incAt(time, 5);
 
     time++;
-    Assert.assertEquals(
-        "The sum of rolling window does not reflect the recent update", 5,
-        window.getSum(time));
+      Assertions.assertEquals(5,
+              window.getSum(time),
+              "The sum of rolling window does not reflect the recent update");
 
     long reorderedTime = time - 2 * BUCKET_LEN;
     window.incAt(reorderedTime, 6);
-    Assert.assertEquals(
-        "The sum of rolling window does not reflect the reordered update", 11,
-        window.getSum(time));
+      Assertions.assertEquals(11,
+              window.getSum(time),
+              "The sum of rolling window does not reflect the reordered update");
 
     time = reorderedTime + WINDOW_LEN;
-    Assert.assertEquals(
-        "The sum of rolling window does not reflect rolling effect", 5,
-        window.getSum(time));
+      Assertions.assertEquals(5,
+              window.getSum(time),
+              "The sum of rolling window does not reflect rolling effect");
   }
 
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
index f025531..df1173f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
@@ -26,14 +26,14 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
 import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
 import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class TestRollingWindowManager {
 
@@ -47,7 +47,7 @@
   final int N_TOP_USERS = 10;
   final int BUCKET_LEN = WINDOW_LEN_MS / BUCKET_CNT;
 
-  @Before
+  @BeforeEach
   public void init() {
     conf = new Configuration();
     conf.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, BUCKET_CNT);
@@ -70,40 +70,40 @@
     time++;
     TopWindow tops = manager.snapshot(time);
 
-    assertEquals("Unexpected number of ops", 3, tops.getOps().size());
+      assertEquals(3, tops.getOps().size(), "Unexpected number of ops");
     assertEquals(TopConf.ALL_CMDS, tops.getOps().get(0).getOpType());
     for (Op op : tops.getOps()) {
       final List<User> topUsers = op.getTopUsers();
-      assertEquals("Unexpected number of users", N_TOP_USERS, topUsers.size());
+        assertEquals(N_TOP_USERS, topUsers.size(), "Unexpected number of users");
       if (op.getOpType().equals("open")) {
         for (int i = 0; i < topUsers.size(); i++) {
           User user = topUsers.get(i);
-          assertEquals("Unexpected count for user " + user.getUser(),
-              (users.length-i)*2, user.getCount());
+            assertEquals(
+                    (users.length - i) * 2, user.getCount(), "Unexpected count for user " + user.getUser());
         }
-        // Closed form of sum(range(2,42,2))
-        assertEquals("Unexpected total count for op", 
-            (2+(users.length*2))*(users.length/2),
-            op.getTotalCount());
+          // Closed form of sum(range(2,42,2))
+          assertEquals(
+                  (2 + (users.length * 2)) * (users.length / 2),
+                  op.getTotalCount(), "Unexpected total count for op");
       }
     }
 
     // move the window forward not to see the "open" results
     time += WINDOW_LEN_MS - 2;
     tops = manager.snapshot(time);
-    assertEquals("Unexpected number of ops", 2, tops.getOps().size());
+      assertEquals(2, tops.getOps().size(), "Unexpected number of ops");
     assertEquals(TopConf.ALL_CMDS, tops.getOps().get(0).getOpType());
     final Op op = tops.getOps().get(1);
-    assertEquals("Should only see close ops", "close", op.getOpType());
+      assertEquals("close", op.getOpType(), "Should only see close ops");
     final List<User> topUsers = op.getTopUsers();
     for (int i = 0; i < topUsers.size(); i++) {
       User user = topUsers.get(i);
-      assertEquals("Unexpected count for user " + user.getUser(),
-          (users.length-i), user.getCount());
+        assertEquals(
+                (users.length - i), user.getCount(), "Unexpected count for user " + user.getUser());
     }
-    // Closed form of sum(range(1,21))
-    assertEquals("Unexpected total count for op",
-        (1 + users.length) * (users.length / 2), op.getTotalCount());
+      // Closed form of sum(range(1,21))
+      assertEquals(
+              (1 + users.length) * (users.length / 2), op.getTotalCount(), "Unexpected total count for op");
   }
 
   @Test
@@ -207,15 +207,15 @@
     rollingWindowManager.recordMetric(0, "op3", "user8", 1);
 
     TopWindow window = rollingWindowManager.snapshot(0);
-    Assert.assertEquals(numOps + 1, window.getOps().size());
+    Assertions.assertEquals(numOps + 1, window.getOps().size());
 
     Op allOp = window.getOps().get(0);
-    Assert.assertEquals(TopConf.ALL_CMDS, allOp.getOpType());
+    Assertions.assertEquals(TopConf.ALL_CMDS, allOp.getOpType());
     List<User> topUsers = allOp.getTopUsers();
-    Assert.assertEquals(numTopUsers * numOps, topUsers.size());
+    Assertions.assertEquals(numTopUsers * numOps, topUsers.size());
     // ensure all the top users for each op are present in the total op.
     for (int i = 1; i < numOps; i++) {
-      Assert.assertTrue(
+      Assertions.assertTrue(
           topUsers.containsAll(window.getOps().get(i).getTopUsers()));
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsCreatePermissions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsCreatePermissions.java
index 1621cd3..b6ff0f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsCreatePermissions.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsCreatePermissions.java
@@ -28,10 +28,10 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -47,13 +47,13 @@
 
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void initializeMiniDFSCluster() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     this.cluster = new MiniDFSCluster.Builder(conf).build();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -79,12 +79,12 @@
       URL url = new URL(uri.toString());
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
       conn.setRequestMethod("PUT");
-      Assert.assertEquals(expectedResponse, conn.getResponseCode());
+      Assertions.assertEquals(expectedResponse, conn.getResponseCode());
 
       NamenodeProtocols namenode = cluster.getNameNode().getRpcServer();
       FsPermission resultingPermission = namenode.getFileInfo(path).
             getPermission();
-      Assert.assertEquals(expectedPermission, resultingPermission.toString());
+      Assertions.assertEquals(expectedPermission, resultingPermission.toString());
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
index 6409945..952c866 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
@@ -45,9 +45,9 @@
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 import org.slf4j.event.Level;
 
@@ -104,7 +104,7 @@
           final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
               namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, null,
               LOCALHOST, null);
-          Assert.assertEquals(ipAddr, chosen.getIpAddr());
+          Assertions.assertEquals(ipAddr, chosen.getIpAddr());
         }
       }
   
@@ -118,9 +118,9 @@
       final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
           namenode, f, 0, 1);
       final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
-      Assert.assertEquals(1, lb.size());
+      Assertions.assertEquals(1, lb.size());
       final DatanodeInfo[] locations = lb.get(0).getLocations();
-      Assert.assertEquals(1, locations.length);
+      Assertions.assertEquals(1, locations.length);
       final DatanodeInfo expected = locations[0];
       
       //For GETFILECHECKSUM, OPEN and APPEND,
@@ -131,7 +131,7 @@
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
             namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, null,
             LOCALHOST, status);
-        Assert.assertEquals(expected, chosen);
+        Assertions.assertEquals(expected, chosen);
       }
   
       { //test OPEN
@@ -139,7 +139,7 @@
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
             namenode, f, GetOpParam.Op.OPEN, 0, blocksize, null,
             LOCALHOST, status);
-        Assert.assertEquals(expected, chosen);
+        Assertions.assertEquals(expected, chosen);
       }
 
       { //test APPEND
@@ -147,7 +147,7 @@
         final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
             namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, null,
             LOCALHOST, status);
-        Assert.assertEquals(expected, chosen);
+        Assertions.assertEquals(expected, chosen);
       }
     } finally {
       cluster.shutdown();
@@ -188,9 +188,9 @@
       final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
           namenode, f, 0, 1);
       final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
-      Assert.assertEquals(1, lb.size());
+      Assertions.assertEquals(1, lb.size());
       final DatanodeInfo[] locations = lb.get(0).getLocations();
-      Assert.assertEquals(3, locations.length);
+      Assertions.assertEquals(3, locations.length);
       
       
       //For GETFILECHECKSUM, OPEN and APPEND,
@@ -205,7 +205,7 @@
               namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize,
               sb.toString(), LOCALHOST, status);
           for (int j = 0; j <= i; j++) {
-            Assert.assertNotEquals(locations[j].getHostName(),
+            Assertions.assertNotEquals(locations[j].getHostName(),
                 chosen.getHostName());
           }
         }
@@ -216,7 +216,7 @@
               namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString(),
               LOCALHOST, status);
           for (int j = 0; j <= i; j++) {
-            Assert.assertNotEquals(locations[j].getHostName(),
+            Assertions.assertNotEquals(locations[j].getHostName(),
                 chosen.getHostName());
           }
         }
@@ -227,7 +227,7 @@
               .chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L,
                   blocksize, sb.toString(), LOCALHOST, status);
           for (int j = 0; j <= i; j++) {
-            Assert.assertNotEquals(locations[j].getHostName(),
+            Assertions.assertNotEquals(locations[j].getHostName(),
                 chosen.getHostName());
           }
         }
@@ -256,7 +256,7 @@
           DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT,
           "DataNode2", LOCALHOST, null);
     } catch (Exception e) {
-      Assert.fail("Failed to exclude DataNode2" + e.getMessage());
+      Assertions.fail("Failed to exclude DataNode2" + e.getMessage());
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 77922a0..16df67e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -36,8 +36,8 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -90,11 +90,11 @@
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -128,7 +128,7 @@
   private static final Logger LOG =
       LoggerFactory.getLogger(TestExternalStoragePolicySatisfier.class);
 
-  @Before
+  @BeforeEach
   public void setUp() {
     config = new HdfsConfiguration();
     config.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -141,7 +141,7 @@
         StoragePolicySatisfierMode.EXTERNAL.toString());
   }
 
-  @After
+  @AfterEach
   public void destroy() throws Exception {
     if (kdc != null) {
       kdc.stop();
@@ -241,7 +241,7 @@
     baseDir = GenericTestUtils
         .getTestDir(TestExternalStoragePolicySatisfier.class.getSimpleName());
     FileUtil.fullyDelete(baseDir);
-    Assert.assertTrue(baseDir.mkdirs());
+    Assertions.assertTrue(baseDir.mkdirs());
 
     Properties kdcConf = MiniKdc.createConf();
     kdc = new MiniKdc(kdcConf, baseDir);
@@ -251,8 +251,8 @@
         UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
     KerberosName.resetDefaultRealm();
-    Assert.assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+      Assertions.assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     keytabFile = new File(baseDir, username + ".keytab");
     String keytab = keytabFile.getAbsolutePath();
@@ -310,7 +310,7 @@
           // verify that sps runs Ok.
           testWhenStoragePolicySetToALLSSD();
           // verify that UGI was logged in using keytab.
-          Assert.assertTrue(UserGroupInformation.isLoginKeytabBased());
+          Assertions.assertTrue(UserGroupInformation.isLoginKeytabBased());
           return null;
         }
       });
@@ -351,7 +351,7 @@
       writeContent(fileExceeds);
       try {
         fs.satisfyStoragePolicy(new Path(fileExceeds));
-        Assert.fail("Should throw exception as it exceeds "
+        Assertions.fail("Should throw exception as it exceeds "
             + "outstanding SPS call Q limit");
       } catch (IOException ioe) {
         GenericTestUtils.assertExceptionContains(
@@ -377,8 +377,8 @@
           HdfsServerConstants.MOVER_ID_PATH, 0, (short) 1, 0);
       restartNamenode();
       boolean running = externalCtxt.isRunning();
-      Assert.assertTrue("SPS should be running as "
-          + "no Mover really running", running);
+        Assertions.assertTrue(running, "SPS should be running as "
+                + "no Mover really running");
     } finally {
       shutdownCluster();
     }
@@ -388,7 +388,7 @@
    * This test need not run as external scan is not a batch based scanning right
    * now.
    */
-  @Ignore("ExternalFileIdCollector is not batch based right now."
+  @Disabled("ExternalFileIdCollector is not batch based right now."
       + " So, ignoring it.")
   public void testBatchProcessingForSPSDirectory() throws Exception {
   }
@@ -396,7 +396,7 @@
   /**
    * This test case is more specific to internal.
    */
-  @Ignore("This test is specific to internal, so skipping here.")
+  @Disabled("This test is specific to internal, so skipping here.")
   public void testWhenMoverIsAlreadyRunningBeforeStoragePolicySatisfier()
       throws Exception {
   }
@@ -404,14 +404,14 @@
   /**
    * This test is specific to internal SPS. So, ignoring it.
    */
-  @Ignore("This test is specific to internal SPS. So, ignoring it.")
+  @Disabled("This test is specific to internal SPS. So, ignoring it.")
   public void testTraverseWhenParentDeleted() throws Exception {
   }
 
   /**
    * This test is specific to internal SPS. So, ignoring it.
    */
-  @Ignore("This test is specific to internal SPS. So, ignoring it.")
+  @Disabled("This test is specific to internal SPS. So, ignoring it.")
   public void testTraverseWhenRootParentDeleted() throws Exception {
   }
 
@@ -701,7 +701,7 @@
 
       try {
         hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
-        Assert.fail(String.format(
+        Assertions.fail(String.format(
             "Should failed to satisfy storage policy "
                 + "for %s since %s is set to false.",
             FILE, DFS_STORAGE_POLICY_ENABLED_KEY));
@@ -718,7 +718,7 @@
       hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(config), config);
       try {
         hdfsAdmin.satisfyStoragePolicy(new Path(nonExistingFile));
-        Assert.fail("Should throw FileNotFoundException for " +
+        Assertions.fail("Should throw FileNotFoundException for " +
             nonExistingFile);
       } catch (FileNotFoundException e) {
 
@@ -728,7 +728,7 @@
         hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
         hdfsAdmin.satisfyStoragePolicy(new Path(FILE));
       } catch (Exception e) {
-        Assert.fail(String.format("Allow to invoke mutlipe times "
+        Assertions.fail(String.format("Allow to invoke mutlipe times "
             + "#satisfyStoragePolicy() api for a path %s , internally just "
             + "skipping addtion to satisfy movement queue.", FILE));
       }
@@ -1159,7 +1159,7 @@
           client.getBlockLocations(testFile, 0, fileLen);
       for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
         for (StorageType type : lb.getStorageTypes()) {
-          Assert.assertEquals(StorageType.DISK, type);
+          Assertions.assertEquals(StorageType.DISK, type);
         }
       }
 
@@ -1190,12 +1190,12 @@
           .getEditLog();
       long lastWrittenTxId = editlog.getLastWrittenTxId();
       fs.satisfyStoragePolicy(filePath);
-      Assert.assertEquals("Xattr should not be added for the file",
-          lastWrittenTxId, editlog.getLastWrittenTxId());
+        Assertions.assertEquals(
+                lastWrittenTxId, editlog.getLastWrittenTxId(), "Xattr should not be added for the file");
       INode inode = hdfsCluster.getNameNode().getNamesystem().getFSDirectory()
           .getINode(filePath.toString());
-      Assert.assertTrue("XAttrFeature should be null for file",
-          inode.getXAttrFeature() == null);
+        Assertions.assertTrue(
+                inode.getXAttrFeature() == null, "XAttrFeature should be null for file");
     } finally {
       shutdownCluster();
     }
@@ -1285,8 +1285,8 @@
       fs.satisfyStoragePolicy(filePath);
       DFSTestUtil.waitExpectedStorageType(filePath.toString(),
           StorageType.ARCHIVE, 3, 60000, hdfsCluster.getFileSystem());
-      assertFalse("Log output does not contain expected log message: ",
-          logs.getOutput().contains("some of the blocks are low redundant"));
+        assertFalse(
+                logs.getOutput().contains("some of the blocks are low redundant"), "Log output does not contain expected log message: ");
     } finally {
       shutdownCluster();
     }
@@ -1558,20 +1558,20 @@
         DEFAULT_BLOCK_SIZE, (short) 3, 0, false, favoredNodes);
 
     LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
-    Assert.assertEquals("Wrong block count", 1,
-        locatedBlocks.locatedBlockCount());
+      Assertions.assertEquals(1,
+              locatedBlocks.locatedBlockCount(), "Wrong block count");
 
     // verify storage type before movement
     LocatedBlock lb = locatedBlocks.get(0);
     StorageType[] storageTypes = lb.getStorageTypes();
     for (StorageType storageType : storageTypes) {
-      Assert.assertTrue(StorageType.DISK == storageType);
+      Assertions.assertTrue(StorageType.DISK == storageType);
     }
 
     // Mock FsDatasetSpi#getPinning to show that the block is pinned.
     DatanodeInfo[] locations = lb.getLocations();
-    Assert.assertEquals(3, locations.length);
-    Assert.assertTrue(favoredNodesCount < locations.length);
+    Assertions.assertEquals(3, locations.length);
+    Assertions.assertTrue(favoredNodesCount < locations.length);
     for(DatanodeInfo dnInfo: locations){
       LOG.info("Simulate block pinning in datanode {}",
           locations[favoredNodesCount]);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index aac1bf5..9e49a87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -21,8 +21,6 @@
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
-import static org.hamcrest.CoreMatchers.equalTo;
 
 import java.io.DataOutputStream;
 import java.io.File;
@@ -82,9 +80,8 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Assume;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -189,7 +186,7 @@
           new ShortCircuitReplicaCreator() {
         @Override
         public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
-          Assert.fail("expected to use existing entry.");
+          Assertions.fail("expected to use existing entry.");
           return null;
         }
       });
@@ -209,7 +206,7 @@
           new ExtendedBlockId(123, "test_bp1"), new ShortCircuitReplicaCreator() {
         @Override
         public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
-          Assert.fail("expected to use existing entry.");
+          Assertions.fail("expected to use existing entry.");
           return null;
         }
       });
@@ -291,7 +288,7 @@
             new ShortCircuitReplicaCreator() {
         @Override
         public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
-          Assert.fail("expected to use existing entry for " + iVal);
+          Assertions.fail("expected to use existing entry for " + iVal);
           return null;
         }
       });
@@ -312,7 +309,7 @@
         }
       });
     Preconditions.checkState(replicaInfos[0].getReplica() == null);
-    Assert.assertTrue(calledCreate.isTrue());
+    Assertions.assertTrue(calledCreate.isTrue());
     // Clean up
     for (int i = 1; i < pairs.length; i++) {
       replicaInfos[i].getReplica().unref();
@@ -385,7 +382,7 @@
         new ExtendedBlockId(1, "test_bp1"), new ShortCircuitReplicaCreator() {
       @Override
       public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
-        Assert.fail("second replica went stale, despite 1 " +
+        Assertions.fail("second replica went stale, despite 1 " +
             "hour staleness time.");
         return null;
       }
@@ -412,7 +409,7 @@
     conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
     DFSInputStream.tcpReadsDisabledForTesting = true;
     DomainSocket.disableBindPathValidation();
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
     return conf;
   }
   
@@ -439,7 +436,7 @@
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
         // The ClientShmManager starts off empty
-        Assert.assertEquals(0,  info.size());
+        Assertions.assertEquals(0,  info.size());
       }
     });
     DomainPeer peer = getDomainPeerToDn(conf);
@@ -451,18 +448,18 @@
     // Allocating the first shm slot requires using up a peer.
     Slot slot = cache.allocShmSlot(datanode, peer, usedPeer,
                     blockId, "testAllocShm_client");
-    Assert.assertNotNull(slot);
-    Assert.assertTrue(usedPeer.booleanValue());
+    Assertions.assertNotNull(slot);
+    Assertions.assertTrue(usedPeer.booleanValue());
     cache.getDfsClientShmManager().visit(new Visitor() {
       @Override
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
         // The ClientShmManager starts off empty
-        Assert.assertEquals(1,  info.size());
+        Assertions.assertEquals(1,  info.size());
         PerDatanodeVisitorInfo vinfo = info.get(datanode);
-        Assert.assertFalse(vinfo.disabled);
-        Assert.assertEquals(0, vinfo.full.size());
-        Assert.assertEquals(1, vinfo.notFull.size());
+        Assertions.assertFalse(vinfo.disabled);
+        Assertions.assertEquals(0, vinfo.full.size());
+        Assertions.assertEquals(1, vinfo.notFull.size());
       }
     });
     cache.scheduleSlotReleaser(slot);
@@ -513,7 +510,7 @@
     int first = fis.read();
     final ExtendedBlock block =
         DFSTestUtil.getFirstBlock(fs, new Path(TEST_FILE));
-    Assert.assertTrue(first != -1);
+    Assertions.assertTrue(first != -1);
     cache.accept(new CacheVisitor() {
       @Override
       public void visit(int numOutstandingMmaps,
@@ -523,8 +520,8 @@
           LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             ExtendedBlockId.fromExtendedBlock(block));
-        Assert.assertNotNull(replica);
-        Assert.assertTrue(replica.getSlot().isValid());
+        Assertions.assertNotNull(replica);
+        Assertions.assertTrue(replica.getSlot().isValid());
       }
     });
     // Stop the Namenode.  This will close the socket keeping the client's
@@ -539,8 +536,8 @@
           LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             ExtendedBlockId.fromExtendedBlock(block));
-        Assert.assertNotNull(replica);
-        Assert.assertFalse(replica.getSlot().isValid());
+        Assertions.assertNotNull(replica);
+        Assertions.assertFalse(replica.getSlot().isValid());
       }
     });
     cluster.shutdown();
@@ -573,7 +570,7 @@
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
         // The ClientShmManager starts off empty.
-        Assert.assertEquals(0,  info.size());
+        Assertions.assertEquals(0,  info.size());
       }
     });
     final Path TEST_PATH = new Path("/test_file");
@@ -584,7 +581,7 @@
     byte contents[] = DFSTestUtil.readFileBuffer(fs, TEST_PATH);
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    Assertions.assertTrue(Arrays.equals(contents, expected));
     // Loading this file brought the ShortCircuitReplica into our local
     // replica cache.
     final DatanodeInfo datanode = new DatanodeInfoBuilder()
@@ -594,12 +591,12 @@
       @Override
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
-        Assert.assertTrue(info.get(datanode).full.isEmpty());
-        Assert.assertFalse(info.get(datanode).disabled);
-        Assert.assertEquals(1, info.get(datanode).notFull.values().size());
+        Assertions.assertTrue(info.get(datanode).full.isEmpty());
+        Assertions.assertFalse(info.get(datanode).disabled);
+        Assertions.assertEquals(1, info.get(datanode).notFull.values().size());
         DfsClientShm shm =
             info.get(datanode).notFull.values().iterator().next();
-        Assert.assertFalse(shm.isDisconnected());
+        Assertions.assertFalse(shm.isDisconnected());
       }
     });
     // Remove the file whose blocks we just read.
@@ -616,9 +613,9 @@
             @Override
             public void visit(HashMap<DatanodeInfo,
                   PerDatanodeVisitorInfo> info) throws IOException {
-              Assert.assertTrue(info.get(datanode).full.isEmpty());
-              Assert.assertFalse(info.get(datanode).disabled);
-              Assert.assertEquals(1,
+              Assertions.assertTrue(info.get(datanode).full.isEmpty());
+              Assertions.assertFalse(info.get(datanode).disabled);
+              Assertions.assertEquals(1,
                   info.get(datanode).notFull.values().size());
               DfsClientShm shm = info.get(datanode).notFull.values().
                   iterator().next();
@@ -745,7 +742,7 @@
       // The shared memory segment allocation will fail because of the failure
       // injector.
       DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
-      Assert.fail("expected readFileBuffer to fail, but it succeeded.");
+      Assertions.fail("expected readFileBuffer to fail, but it succeeded.");
     } catch (Throwable t) {
       GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
           "testing, but we failed to do a non-TCP read.", t);
@@ -898,7 +895,7 @@
       DatanodeInfo[] nodes = blk.getLocations();
 
       try {
-        Assert.assertNull(new BlockReaderFactory(new DfsClientConf(conf))
+        Assertions.assertNull(new BlockReaderFactory(new DfsClientConf(conf))
             .setInetSocketAddress(NetUtils.createSocketAddr(nodes[0]
                 .getXferAddr()))
             .setClientCacheContext(clientContext)
@@ -907,7 +904,7 @@
             .setBlockToken(new Token())
             .createShortCircuitReplicaInfo());
       } catch (NullPointerException ex) {
-        Assert.fail("Should not throw NPE when the native library is unable " +
+        Assertions.fail("Should not throw NPE when the native library is unable " +
             "to create new files!");
       }
     }
@@ -949,9 +946,9 @@
       Thread.sleep(2000);
       cache.scheduleSlotReleaser(slot2);
       Thread.sleep(2000);
-      Assert.assertEquals(0,
+      Assertions.assertEquals(0,
           cluster.getDataNodes().get(0).getShortCircuitRegistry().getShmNum());
-      Assert.assertEquals(0, cache.getDfsClientShmManager().getShmNum());
+      Assertions.assertEquals(0, cache.getDfsClientShmManager().getShmNum());
     } finally {
       cluster.shutdown();
     }
@@ -995,9 +992,9 @@
       }
       cache.scheduleSlotReleaser(slot2);
       Thread.sleep(2000);
-      Assert.assertEquals(0,
+      Assertions.assertEquals(0,
           cluster.getDataNodes().get(0).getShortCircuitRegistry().getShmNum());
-      Assert.assertEquals(0, cache.getDfsClientShmManager().getShmNum());
+      Assertions.assertEquals(0, cache.getDfsClientShmManager().getShmNum());
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index f2ee48c..5698e1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -17,9 +17,8 @@
  */
 package org.apache.hadoop.hdfs.shortcircuit;
 
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.EOFException;
 import java.io.File;
@@ -61,12 +60,11 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test for short circuit read functionality using {@link BlockReaderLocal}.
@@ -78,20 +76,20 @@
 public class TestShortCircuitLocalRead {
   private static TemporarySocketDirectory sockDir;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() {
     sockDir = new TemporarySocketDirectory();
     DomainSocket.disableBindPathValidation();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws IOException {
     sockDir.close();
   }
 
-  @Before
+  @BeforeEach
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assertions.assertNull(DomainSocket.getLoadingFailureReason());
   }
 
   static final long seed = 0xDEADBEEFL;
@@ -115,7 +113,7 @@
       int len, String message) {
     for (int idx = 0; idx < len; idx++) {
       if (expected[from + idx] != actual[idx]) {
-        Assert.fail(message + " byte " + (from + idx) + " differs. expected " +
+        Assertions.fail(message + " byte " + (from + idx) + " differs. expected " +
             expected[from + idx] + " actual " + actual[idx] +
             "\nexpected: " +
             StringUtils.byteToHexString(expected, from, from + len) +
@@ -275,8 +273,8 @@
     try {
       // check that / exists
       Path path = new Path("/");
-      assertTrue("/ should be a directory",
-          fs.getFileStatus(path).isDirectory());
+        assertTrue(
+                fs.getFileStatus(path).isDirectory(), "/ should be a directory");
 
       byte[] fileData = AppendTestUtil.randomBytes(seed, size);
       Path file1 = fs.makeQualified(new Path("filelocal.dat"));
@@ -374,11 +372,11 @@
               dnInfo, conf, 60000, false);
       try {
         proxy.getBlockLocalPathInfo(blk, token);
-        Assert.fail("The call should have failed as this user "
+        Assertions.fail("The call should have failed as this user "
             + " is not configured in "
             + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
       } catch (IOException ex) {
-        Assert.assertTrue(ex.getMessage().contains(
+        Assertions.assertTrue(ex.getMessage().contains(
             "not configured in "
             + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
       }
@@ -405,8 +403,8 @@
     try {
       // check that / exists
       Path path = new Path("/");
-      assertTrue("/ should be a directory",
-          fs.getFileStatus(path).isDirectory());
+        assertTrue(
+                fs.getFileStatus(path).isDirectory(), "/ should be a directory");
 
       byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
       // create a new file in home directory. Do not close it.
@@ -467,10 +465,10 @@
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
@@ -489,7 +487,7 @@
         byte buf[] = new byte[100];
         fsIn.seek(2000);
         fsIn.readFully(buf, 0, buf.length);
-        Assert.fail("shouldn't be able to read from corrupt 0-length " +
+        Assertions.fail("shouldn't be able to read from corrupt 0-length " +
             "block file.");
       } catch (IOException e) {
         DFSClient.LOG.error("caught exception ", e);
@@ -610,8 +608,8 @@
     // check that / exists
     Path path = new Path("/");
     URI uri = cluster.getURI();
-    assertTrue(
-        "/ should be a directory", fs.getFileStatus(path).isDirectory());
+      assertTrue(fs.getFileStatus(path).isDirectory(),
+              "/ should be a directory");
 
     byte[] fileData = AppendTestUtil.randomBytes(seed, size);
     Path file1 = new Path("filelocal.dat");
@@ -622,10 +620,10 @@
     try {
       checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
           conf, shortCircuitFails);
-      //BlockReaderRemote2 have unsupported method read(ByteBuffer bf)
-      assertFalse(
-          "BlockReaderRemote2 unsupported method read(ByteBuffer bf) error",
-          checkUnsupportedMethod(fs, file1, fileData, readOffset));
+        //BlockReaderRemote2 have unsupported method read(ByteBuffer bf)
+        assertFalse(
+                checkUnsupportedMethod(fs, file1, fileData, readOffset),
+                "BlockReaderRemote2 unsupported method read(ByteBuffer bf) error");
     } catch(IOException e) {
       throw new IOException(
           "doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java
index f99ef01..cccf3e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test class to test Admin Helper.
@@ -28,11 +28,11 @@
   @Test
   public void prettifyExceptionWithNpe() {
     String pretty = AdminHelper.prettifyException(new NullPointerException());
-    Assert.assertTrue(
-        "Prettified exception message doesn't contain the required exception "
-            + "message",
-        pretty.startsWith("NullPointerException at org.apache.hadoop.hdfs.tools"
-            + ".TestAdminHelper.prettifyExceptionWithNpe"));
+      Assertions.assertTrue(
+              pretty.startsWith("NullPointerException at org.apache.hadoop.hdfs.tools"
+                      + ".TestAdminHelper.prettifyExceptionWithNpe"),
+              "Prettified exception message doesn't contain the required exception "
+                      + "message");
   }
 
   @Test
@@ -42,7 +42,7 @@
         new IllegalArgumentException("Something is wrong",
             new IllegalArgumentException("Something is illegal")));
 
-    Assert.assertEquals(
+    Assertions.assertEquals(
         "IllegalArgumentException: Something is wrong",
         pretty);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index e9e1cad..d7c18c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -81,10 +81,10 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
-import org.junit.Assert;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -93,12 +93,8 @@
 import static org.hamcrest.CoreMatchers.anyOf;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
@@ -121,7 +117,7 @@
   private String tempResource = null;
   private static final int NUM_DATANODES = 2;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
@@ -145,7 +141,7 @@
     err.reset();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     try {
       System.out.flush();
@@ -244,11 +240,11 @@
       /* collect outputs */
       final List<String> outs = Lists.newArrayList();
       scanIntoList(out, outs);
-      /* verify results */
-      assertEquals(
-          "One line per DataNode like: Uptime: XXX, Software version: x.y.z,"
-              + " Config version: core-x.y.z,hdfs-x",
-          1, outs.size());
+        /* verify results */
+        assertEquals(
+                1, outs.size(),
+                "One line per DataNode like: Uptime: XXX, Software version: x.y.z,"
+                        + " Config version: core-x.y.z,hdfs-x");
       assertThat(outs.get(0),
           is(allOf(containsString("Uptime:"),
               containsString("Software version"),
@@ -322,9 +318,9 @@
       assertEquals(-1, ret);
 
       scanIntoList(out, outs);
-      assertTrue("Unexpected " + command + " stdout: " + out, outs.isEmpty());
-      assertTrue("Unexpected " + command + " stderr: " + err,
-          err.toString().contains("Exception"));
+        assertTrue(outs.isEmpty(), "Unexpected " + command + " stdout: " + out);
+        assertTrue(
+                err.toString().contains("Exception"), "Unexpected " + command + " stderr: " + err);
     }
   }
 
@@ -493,11 +489,11 @@
 
       /* verify results */
       assertEquals(0, ret);
-      assertEquals(
-          "There should be three lines per Datanode: the 1st line is"
-              + " rack info, 2nd node info, 3rd empty line. The total"
-              + " should be as a result of 3 * numDn.",
-          12, outs.size());
+        assertEquals(
+                12, outs.size(),
+                "There should be three lines per Datanode: the 1st line is"
+                        + " rack info, 2nd node info, 3rd empty line. The total"
+                        + " should be as a result of 3 * numDn.");
       assertThat(outs.get(0),
           is(allOf(containsString("Rack:"), containsString("/d1/r1"))));
       assertThat(outs.get(3),
@@ -531,21 +527,21 @@
     final List<String> errs = Lists.newArrayList();
     awaitReconfigurationFinished("namenode", address, outs, errs);
 
-    // verify change
-    assertEquals(
-        DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value",
-        6,
-        namenode
-          .getConf()
-          .getLong(DFS_HEARTBEAT_INTERVAL_KEY,
-                DFS_HEARTBEAT_INTERVAL_DEFAULT));
-    assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value",
-        6,
-        namenode
-          .getNamesystem()
-          .getBlockManager()
-          .getDatanodeManager()
-          .getHeartbeatInterval());
+      // verify change
+      assertEquals(
+              6,
+              namenode
+                      .getConf()
+                      .getLong(DFS_HEARTBEAT_INTERVAL_KEY,
+                              DFS_HEARTBEAT_INTERVAL_DEFAULT),
+              DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
+      assertEquals(
+              6,
+              namenode
+                      .getNamesystem()
+                      .getBlockManager()
+                      .getDatanodeManager()
+                      .getHeartbeatInterval(), DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
 
     int offset = 1;
     assertThat(outs.get(offset), containsString("SUCCESS: Changed property "
@@ -631,8 +627,8 @@
       LocatedBlocks lbs = miniCluster.getFileSystem().getClient().
           getNamenode().getBlockLocations(
           file.toString(), 0, fileLength);
-      assertTrue("Unexpected block type: " + lbs.get(0),
-          lbs.get(0) instanceof LocatedBlock);
+        assertTrue(
+                lbs.get(0) instanceof LocatedBlock, "Unexpected block type: " + lbs.get(0));
       LocatedBlock locatedBlock = lbs.get(0);
       DatanodeInfo locatedDataNode = locatedBlock.getLocations()[0];
       LOG.info("Replica block located on: " + locatedDataNode);
@@ -665,8 +661,8 @@
           break;
         }
       }
-      assertTrue("Unable to choose a DataNode to shutdown!",
-          dataNodeToShutdown != null);
+        assertTrue(
+                dataNodeToShutdown != null, "Unable to choose a DataNode to shutdown!");
 
       // Shut down the DataNode not hosting the replicated block
       LOG.info("Shutting down: " + dataNodeToShutdown);
@@ -680,8 +676,8 @@
       // Corrupt the replicated block
       final int blockFilesCorrupted = miniCluster
           .corruptBlockOnDataNodes(block);
-      assertEquals("Fail to corrupt all replicas for block " + block,
-          replFactor, blockFilesCorrupted);
+        assertEquals(
+                replFactor, blockFilesCorrupted, "Fail to corrupt all replicas for block " + block);
 
       try {
         IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(),
@@ -708,8 +704,8 @@
       lbs = miniCluster.getFileSystem().getClient().
           getNamenode().getBlockLocations(
           ecFile.toString(), 0, blockGroupSize);
-      assertTrue("Unexpected block type: " + lbs.get(0),
-          lbs.get(0) instanceof LocatedStripedBlock);
+        assertTrue(
+                lbs.get(0) instanceof LocatedStripedBlock, "Unexpected block type: " + lbs.get(0));
       LocatedStripedBlock bg =
           (LocatedStripedBlock)(lbs.get(0));
 
@@ -1038,23 +1034,23 @@
     assertEquals(0, ToolRunner.run(dfsAdmin,
         new String[]{"-setBalancerBandwidth", "10000"}));
     outStr = scanIntoString(out);
-    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
-        "bandwidth is set to 10000"));
+      assertTrue(outStr.contains("Balancer " +
+              "bandwidth is set to 10000"), "Did not set bandwidth!");
 
     // Test parsing with units
     resetStream();
     assertEquals(0, ToolRunner.run(dfsAdmin,
         new String[]{"-setBalancerBandwidth", "10m"}));
     outStr = scanIntoString(out);
-    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
-        "bandwidth is set to 10485760"));
+      assertTrue(outStr.contains("Balancer " +
+              "bandwidth is set to 10485760"), "Did not set bandwidth!");
 
     resetStream();
     assertEquals(0, ToolRunner.run(dfsAdmin,
         new String[]{"-setBalancerBandwidth", "10k"}));
     outStr = scanIntoString(out);
-    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
-        "bandwidth is set to 10240"));
+      assertTrue(outStr.contains("Balancer " +
+              "bandwidth is set to 10240"), "Did not set bandwidth!");
 
     // Test negative numbers
     assertEquals(-1, ToolRunner.run(dfsAdmin,
@@ -1128,9 +1124,9 @@
         }
       });
     } catch (RemoteException re) {
-      Assert.assertTrue(re.unwrapRemoteException()
+      Assertions.assertTrue(re.unwrapRemoteException()
           instanceof AccessControlException);
-      Assert.assertTrue(re.unwrapRemoteException().getMessage()
+      Assertions.assertTrue(re.unwrapRemoteException().getMessage()
           .equals("User: " + realUser +
               " is not allowed to impersonate " + proxyUser));
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 6b8657c..16dc1aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -34,13 +34,10 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 public class TestDFSAdminWithHA {
 
@@ -111,7 +108,7 @@
     conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY, 0);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     try {
       System.out.flush();
@@ -135,25 +132,25 @@
     setUpHaCluster(false);
     // Enter safemode
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     // Get safemode
     exitCode = admin.run(new String[] {"-safemode", "get"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     // Leave safemode
     exitCode = admin.run(new String[] {"-safemode", "leave"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     message = "Safe mode is OFF in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     // Get safemode
     exitCode = admin.run(new String[] {"-safemode", "get"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     message = "Safe mode is OFF in.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -163,12 +160,12 @@
     setUpHaCluster(false);
     // Safe mode should be turned ON in order to create namespace image.
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-saveNamespace"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     message = "Save namespace successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -178,14 +175,14 @@
     setUpHaCluster(false);
     // Safe mode should be turned ON in order to create namespace image.
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     cluster.getDfsCluster().shutdownNameNode(1);
 //
     exitCode = admin.run(new String[] {"-saveNamespace"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "Save namespace successful for.*" + newLine
         + "Save namespace failed for.*" + newLine;
     assertOutputMatches(message);
@@ -196,14 +193,14 @@
     setUpHaCluster(false);
     // Safe mode should be turned ON in order to create namespace image.
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     cluster.getDfsCluster().shutdownNameNode(0);
 
     exitCode = admin.run(new String[] {"-saveNamespace"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "Save namespace failed for.*" + newLine
         + "Save namespace successful for.*" + newLine;
     assertOutputMatches(message);
@@ -214,7 +211,7 @@
     setUpHaCluster(false);
     // Safe mode should be turned ON in order to create namespace image.
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
@@ -222,7 +219,7 @@
     cluster.getDfsCluster().shutdownNameNode(1);
 
     exitCode = admin.run(new String[] {"-saveNamespace"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "Save namespace failed for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -231,18 +228,18 @@
   public void testRestoreFailedStorage() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "restoreFailedStorage is set to false for.*";
     // Default is false
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage is set to true for.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage is set to false for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -252,20 +249,20 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "restoreFailedStorage is set to false for.*" + newLine
         + "restoreFailedStorage failed for.*" + newLine;
     // Default is false
     assertOutputMatches(message);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage is set to true for.*" + newLine
         + "restoreFailedStorage failed for.*" + newLine;
     assertOutputMatches(message);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage is set to false for.*" + newLine
         + "restoreFailedStorage failed for.*" + newLine;
     assertOutputMatches(message);
@@ -276,20 +273,20 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "restoreFailedStorage failed for.*" + newLine
         + "restoreFailedStorage is set to false for.*" + newLine;
     // Default is false
     assertOutputMatches(message);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage failed for.*" + newLine
         + "restoreFailedStorage is set to true for.*" + newLine;
     assertOutputMatches(message);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage failed for.*" + newLine
         + "restoreFailedStorage is set to false for.*" + newLine;
     assertOutputMatches(message);
@@ -301,18 +298,18 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "restoreFailedStorage failed for.*";
     // Default is false
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage failed for.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage failed for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -321,7 +318,7 @@
   public void testRefreshNodes() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh nodes successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -331,7 +328,7 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh nodes successful for.*" + newLine
         + "Refresh nodes failed for.*" + newLine;
     assertOutputMatches(message);
@@ -342,7 +339,7 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh nodes failed for.*" + newLine
         + "Refresh nodes successful for.*" + newLine;
     assertOutputMatches(message);
@@ -354,7 +351,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh nodes failed for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -365,7 +362,7 @@
     cluster.getDfsCluster().transitionToActive(0);
 
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Balancer bandwidth is set to 10";
     assertOutputMatches(message + newLine);
   }
@@ -376,7 +373,7 @@
     cluster.getDfsCluster().shutdownNameNode(1);
     cluster.getDfsCluster().transitionToActive(0);
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Balancer bandwidth is set to 10";
     assertOutputMatches(message + newLine);
   }
@@ -387,7 +384,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().transitionToActive(1);
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Balancer bandwidth is set to 10";
     assertOutputMatches(message + newLine);
   }
@@ -398,7 +395,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Balancer bandwidth is set failed." + newLine
         + ".*" + newLine;
     assertOutputMatches(message);
@@ -408,7 +405,7 @@
   public void testSetNegativeBalancerBandwidth() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "-10"});
-    assertEquals("Negative bandwidth value must fail the command", -1, exitCode);
+      assertEquals(-1, exitCode, "Negative bandwidth value must fail the command");
   }
 
   @Test (timeout = 30000)
@@ -416,7 +413,7 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().transitionToActive(0);
     int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String messageFromActiveNN = "Created metasave file dfs.meta "
         + "in the log directory of namenode.*";
     String messageFromStandbyNN = "Skip Standby NameNode, since it "
@@ -431,7 +428,7 @@
     cluster.getDfsCluster().transitionToActive(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Created metasave file dfs.meta in the log directory"
         + " of namenode.*" + newLine
         + "Created metasave file dfs.meta in the log directory"
@@ -445,7 +442,7 @@
     cluster.getDfsCluster().transitionToActive(1);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Created metasave file dfs.meta in the log directory"
         + " of namenode.*failed" + newLine
         + "Created metasave file dfs.meta in the log directory"
@@ -459,7 +456,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Created metasave file dfs.meta in the log directory"
         + " of namenode.*failed";
     assertOutputMatches(message + newLine + message + newLine);
@@ -469,7 +466,7 @@
   public void testRefreshServiceAcl() throws Exception {
     setUpHaCluster(true);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh service acl successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -479,7 +476,7 @@
     setUpHaCluster(true);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh service acl successful for.*" + newLine
         + "Refresh service acl failed for.*" + newLine;
     assertOutputMatches(message);
@@ -490,7 +487,7 @@
     setUpHaCluster(true);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh service acl failed for.*" + newLine
         + "Refresh service acl successful for.*" + newLine;
     assertOutputMatches(message);
@@ -502,7 +499,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh service acl failed for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -512,7 +509,7 @@
   public void testRefreshUserToGroupsMappings() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh user to groups mapping successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -522,7 +519,7 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh user to groups mapping successful for.*"
         + newLine
         + "Refresh user to groups mapping failed for.*"
@@ -535,7 +532,7 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh user to groups mapping failed for.*"
         + newLine
         + "Refresh user to groups mapping successful for.*"
@@ -549,7 +546,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh user to groups mapping failed for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -559,7 +556,7 @@
     setUpHaCluster(false);
     int exitCode = admin.run(
         new String[] {"-refreshSuperUserGroupsConfiguration"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh super user groups configuration successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -571,7 +568,7 @@
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(
         new String[] {"-refreshSuperUserGroupsConfiguration"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh super user groups configuration successful for.*"
         + newLine
         + "Refresh super user groups configuration failed for.*"
@@ -586,7 +583,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(
         new String[] {"-refreshSuperUserGroupsConfiguration"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh super user groups configuration failed for.*"
         + newLine
         + "Refresh super user groups configuration successful for.*"
@@ -602,7 +599,7 @@
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(
         new String[] {"-refreshSuperUserGroupsConfiguration"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh super user groups configuration failed for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -611,7 +608,7 @@
   public void testRefreshCallQueue() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh call queue successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -621,7 +618,7 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh call queue successful for.*" + newLine
         + "Refresh call queue failed for.*" + newLine;
     assertOutputMatches(message);
@@ -632,7 +629,7 @@
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh call queue failed for.*" + newLine
         + "Refresh call queue successful for.*" + newLine;
     assertOutputMatches(message);
@@ -644,7 +641,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh call queue failed for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -653,13 +650,13 @@
   public void testFinalizeUpgrade() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = ".*Cannot finalize with no NameNode active";
     assertOutputMatches(message + newLine);
 
     cluster.getDfsCluster().transitionToActive(0);
     exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     message = "Finalize upgrade successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
@@ -670,7 +667,7 @@
     cluster.getDfsCluster().shutdownNameNode(1);
     cluster.getDfsCluster().transitionToActive(0);
     int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Finalize upgrade successful for .*" + newLine
         + "Finalize upgrade failed for .*" + newLine;
     assertOutputMatches(message);
@@ -682,7 +679,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().transitionToActive(1);
     int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Finalize upgrade failed for .*" + newLine
         + "Finalize upgrade successful for .*" + newLine;
     assertOutputMatches(message);
@@ -694,7 +691,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = ".*2 exceptions.*";
     assertOutputMatches(message + newLine);
   }
@@ -747,7 +744,7 @@
 
     // Finalize the upgrade
     int exitCode = admin.run(new String[] {"-upgrade", "finalize"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
     message = finalizeSuccessMsg + newLine + finalizeSuccessMsg + newLine;
     assertOutputMatches(message);
 
@@ -759,7 +756,7 @@
   private void verifyUpgradeQueryOutput(String message, int expected) throws
       Exception {
     int exitCode = admin.run(new String[] {"-upgrade", "query"});
-    assertEquals(err.toString().trim(), expected, exitCode);
+      assertEquals(expected, exitCode, err.toString().trim());
     assertOutputMatches(message);
   }
 
@@ -769,7 +766,7 @@
     cluster.getDfsCluster().shutdownNameNode(1);
     cluster.getDfsCluster().transitionToActive(0);
     int exitCode = admin.run(new String[] {"-listOpenFiles"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
   }
 
   @Test (timeout = 30000)
@@ -778,7 +775,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().transitionToActive(1);
     int exitCode = admin.run(new String[] {"-listOpenFiles"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+      assertEquals(0, exitCode, err.toString().trim());
   }
 
   @Test
@@ -787,7 +784,7 @@
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-listOpenFiles"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+      assertNotEquals(0, exitCode, err.toString().trim());
     String message = ".*" + newLine + "List open files failed." + newLine;
     assertOutputMatches(message);
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index 0086134..48905a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.hdfs.tools;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -43,8 +41,8 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.test.MockitoUtil;
 import org.apache.hadoop.util.Shell;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 
@@ -112,7 +110,7 @@
         FENCER_FALSE_COMMAND_WINDOWS : FENCER_FALSE_COMMAND_UNIX;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
     mockZkfcProtocol = MockitoUtil.mockProtocol(ZKFCProtocol.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index aa048f8..8d4f5e69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -39,10 +37,9 @@
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.io.Files;
@@ -67,7 +64,7 @@
 
   private int nn1Port;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf)
@@ -81,7 +78,7 @@
     nn1Port = cluster.getNameNodePort(0);
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -170,9 +167,9 @@
 
     NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false);
     assertEquals(-1, runTool("-failover", "nn2", "nn1"));
-    assertTrue("Bad output: " + errOutput,
-        errOutput.contains("is not ready to become active: " +
-            "The NameNode is in safemode"));
+      assertTrue(
+              errOutput.contains("is not ready to become active: " +
+                      "The NameNode is in safemode"), "Bad output: " + errOutput);
   }
     
   /**
@@ -277,8 +274,8 @@
     runTool("-transitionToActive", "nn1");
     runTool("-transitionToActive", "nn2");
 
-    assertFalse("Both namenodes cannot be active", nn1.isActiveState() 
-        && nn2.isActiveState());
+      assertFalse(nn1.isActiveState()
+              && nn2.isActiveState(), "Both namenodes cannot be active");
    
     /*  In this test case, we have deliberately shut down nn1 and this will
         cause HAAAdmin#isOtherTargetNodeActive to throw an Exception 
@@ -294,7 +291,7 @@
     assertFalse(cluster.isNameNodeUp(0));
     
     runTool("-transitionToActive", "nn2", "--forceactive");
-    assertTrue("Namenode nn2 should be active", nn2.isActiveState());
+      assertTrue(nn2.isActiveState(), "Namenode nn2 should be active");
   }
   
   private int runTool(String ... args) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
index 0a7a87c..4437a34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.tools;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayInputStream;
 import java.io.InputStream;
@@ -52,9 +52,9 @@
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.util.function.Supplier;
 
@@ -72,7 +72,7 @@
     EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
   }
   
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new Configuration();
     // Specify the quorum per-nameservice, to ensure that these configs
@@ -129,7 +129,7 @@
     fs = HATestUtil.configureFailoverFs(cluster, conf);
   }
   
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -236,9 +236,9 @@
     DFSZKFailoverController zkfc = DFSZKFailoverController.create(
         conf);
 
-    assertEquals("Bind address not expected to be wildcard by default.",
-        zkfc.getRpcAddressToBindTo().getHostString(),
-        LOCALHOST_SERVER_ADDRESS);
+      assertEquals(
+              zkfc.getRpcAddressToBindTo().getHostString(),
+              LOCALHOST_SERVER_ADDRESS, "Bind address not expected to be wildcard by default.");
   }
 
   @Test(timeout=30000)
@@ -249,8 +249,8 @@
         conf);
     String addr = zkfc.getRpcAddressToBindTo().getHostString();
 
-    assertEquals("Bind address " + addr + " is not wildcard.",
-        addr, WILDCARD_ADDRESS);
+      assertEquals(
+              addr, WILDCARD_ADDRESS, "Bind address " + addr + " is not wildcard.");
   }
 
   /**
@@ -290,14 +290,14 @@
     System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
     int result = tool.run(
         new String[]{"-transitionToObserver", "-forcemanual", "nn2"});
-    assertEquals("State transition returned: " + result, 0, result);
+      assertEquals(0, result, "State transition returned: " + result);
     waitForHAState(1, HAServiceState.OBSERVER);
     // Answer "yes" to the prompt for --forcemanual
     System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
     result = tool.run(
         new String[]{"-transitionToStandby", "-forcemanual", "nn2"});
     System.setIn(inOriginial);
-    assertEquals("State transition returned: " + result, 0, result);
+      assertEquals(0, result, "State transition returned: " + result);
     waitForHAState(1, HAServiceState.STANDBY);
   }
 
@@ -313,7 +313,7 @@
       System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
       int result = tool.run(
           new String[]{"-transitionToObserver", "-forcemanual", "nn2"});
-      assertEquals("State transition returned: " + result, 0, result);
+        assertEquals(0, result, "State transition returned: " + result);
       waitForHAState(1, HAServiceState.OBSERVER);
       waitForZKFCState(thr2.zkfc, HAServiceState.OBSERVER);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
index 5c890a5..f8e2b44 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
@@ -27,17 +27,17 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.PrintStream;
 
 import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestDebugAdmin {
 
@@ -50,7 +50,7 @@
   private DebugAdmin admin;
   private DataNode datanode;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     final File testRoot = new File(TEST_ROOT_DIR);
     testRoot.delete();
@@ -63,7 +63,7 @@
     datanode = cluster.getDataNodes().get(0);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
index 5c1ec4a..2868c41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.doReturn;
@@ -45,9 +43,9 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.tools.FakeRenewer;
-import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -90,19 +88,19 @@
 
     Credentials creds = Credentials.readTokenStorageFile(p, conf);
     Iterator<Token<?>> itr = creds.getAllTokens().iterator();
-    assertTrue("token not exist error", itr.hasNext());
+      assertTrue(itr.hasNext(), "token not exist error");
 
     Token<?> fetchedToken = itr.next();
-    Assert.assertArrayEquals("token wrong identifier error",
-        testToken.getIdentifier(), fetchedToken.getIdentifier());
-    Assert.assertArrayEquals("token wrong password error",
-        testToken.getPassword(), fetchedToken.getPassword());
+      Assertions.assertArrayEquals(
+              testToken.getIdentifier(), fetchedToken.getIdentifier(), "token wrong identifier error");
+      Assertions.assertArrayEquals(
+              testToken.getPassword(), fetchedToken.getPassword(), "token wrong password error");
 
     DelegationTokenFetcher.renewTokens(conf, p);
-    Assert.assertEquals(testToken, FakeRenewer.getLastRenewed());
+    Assertions.assertEquals(testToken, FakeRenewer.getLastRenewed());
 
     DelegationTokenFetcher.cancelTokens(conf, p);
-    Assert.assertEquals(testToken, FakeRenewer.getLastCanceled());
+    Assertions.assertEquals(testToken, FakeRenewer.getLastCanceled());
   }
 
   /**
@@ -116,7 +114,7 @@
     Path p = new Path(f.getRoot().getAbsolutePath(), tokenFile);
     DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
     // When Token returned is null, TokenFile should not exist
-    Assert.assertFalse(p.getFileSystem(conf).exists(p));
+    Assertions.assertFalse(p.getFileSystem(conf).exists(p));
 
   }
 
@@ -135,18 +133,18 @@
       DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
       Credentials creds = Credentials.readTokenStorageFile(p, conf);
       Iterator<Token<?>> itr = creds.getAllTokens().iterator();
-      assertTrue("token not exist error", itr.hasNext());
+        assertTrue(itr.hasNext(), "token not exist error");
       final Token token = itr.next();
-      assertNotNull("Token should be there without renewer", token);
+        assertNotNull(token, "Token should be there without renewer");
 
       // Test compatibility of DelegationTokenFetcher.printTokensToString
       String expectedNonVerbose = "Token (HDFS_DELEGATION_TOKEN token 1 for " +
           System.getProperty("user.name") + " with renewer ) for";
       String resNonVerbose =
           DelegationTokenFetcher.printTokensToString(conf, p, false);
-      assertTrue("The non verbose output is expected to start with \""
-          + expectedNonVerbose +"\"",
-          resNonVerbose.startsWith(expectedNonVerbose));
+        assertTrue(
+                resNonVerbose.startsWith(expectedNonVerbose), "The non verbose output is expected to start with \""
+                + expectedNonVerbose + "\"");
       LOG.info(resNonVerbose);
       LOG.info(
           DelegationTokenFetcher.printTokensToString(conf, p, true));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestECAdmin.java
index 61846ef..6c7d0eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestECAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestECAdmin.java
@@ -21,10 +21,10 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -33,9 +33,7 @@
 import java.io.PrintStream;
 import java.util.concurrent.TimeUnit;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Tests some ECAdmin scenarios that are hard to test from
@@ -70,13 +68,13 @@
   public Timeout globalTimeout =
       new Timeout(300000, TimeUnit.MILLISECONDS);
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     System.setOut(new PrintStream(out));
     System.setErr(new PrintStream(err));
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     try {
       System.out.flush();
@@ -101,7 +99,7 @@
 
     cluster = DFSTestUtil.setupCluster(conf, numDataNodes, numRacks, 0);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is not successful", 2, ret);
+      assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughDataNodesMessage(RS_6_3, numDataNodes, expectedNumDataNodes);
   }
 
@@ -116,7 +114,7 @@
     cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
     cluster.getFileSystem().enableErasureCodingPolicy(testPolicy);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is not successful", 2, ret);
+      assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughRacksMessage(testPolicy, numRacks, expectedNumRacks);
   }
 
@@ -131,7 +129,7 @@
     cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
     cluster.getFileSystem().enableErasureCodingPolicy(testPolicy);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is not successful", 2, ret);
+      assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughRacksMessage(testPolicy, numRacks, expectedNumRacks);
   }
 
@@ -146,7 +144,7 @@
     cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
     cluster.getFileSystem().enableErasureCodingPolicy(testPolicy);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is not successful", 2, ret);
+      assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughRacksMessage(testPolicy, numRacks, expectedNumRacks);
   }
 
@@ -154,11 +152,11 @@
   public void testRS63Good() throws Exception {
     cluster = DFSTestUtil.setupCluster(conf, 9, 3, 0);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Result of cluster topology verify " +
-        "should be logged correctly", out.toString().contains(
-        "The cluster setup can support EC policies: " + RS_6_3));
-    assertTrue("Error output should be empty", err.toString().isEmpty());
+      assertEquals(0, ret, "Return value of the command is successful");
+      assertTrue(out.toString().contains(
+              "The cluster setup can support EC policies: " + RS_6_3), "Result of cluster topology verify " +
+              "should be logged correctly");
+      assertTrue(err.toString().isEmpty(), "Error output should be empty");
   }
 
   @Test
@@ -166,11 +164,11 @@
     cluster = DFSTestUtil.setupCluster(conf, 9, 3, 0);
     cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Result of cluster topology verify " +
-            "should be logged correctly",
-        out.toString().contains("No erasure coding policy is given"));
-    assertTrue("Error output should be empty", err.toString().isEmpty());
+      assertEquals(0, ret, "Return value of the command is successful");
+      assertTrue(
+              out.toString().contains("No erasure coding policy is given"), "Result of cluster topology verify " +
+              "should be logged correctly");
+      assertTrue(err.toString().isEmpty(), "Error output should be empty");
   }
 
   @Test
@@ -184,16 +182,16 @@
     final int ret = runCommandWithParams("-enablePolicy", "-policy",
         testPolicy);
 
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Enabling policy should be logged", out.toString()
-        .contains("Erasure coding policy " + testPolicy + " is enabled"));
-    assertTrue("Warning about cluster topology should be printed",
-        err.toString().contains("Warning: The cluster setup does not support " +
-        "EC policy " + testPolicy + ". Reason:"));
-    assertTrue("Warning about cluster topology should be printed",
-        err.toString()
-            .contains(" racks are required for the erasure coding policies: " +
-                testPolicy));
+      assertEquals(0, ret, "Return value of the command is successful");
+      assertTrue(out.toString()
+              .contains("Erasure coding policy " + testPolicy + " is enabled"), "Enabling policy should be logged");
+      assertTrue(
+              err.toString().contains("Warning: The cluster setup does not support " +
+                      "EC policy " + testPolicy + ". Reason:"), "Warning about cluster topology should be printed");
+      assertTrue(
+              err.toString()
+                      .contains(" racks are required for the erasure coding policies: " +
+                              testPolicy), "Warning about cluster topology should be printed");
   }
 
   @Test
@@ -204,12 +202,12 @@
     final int ret = runCommandWithParams("-enablePolicy", "-policy",
         testPolicy);
 
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Enabling policy should be logged", out.toString()
-        .contains("Erasure coding policy " + testPolicy + " is enabled"));
-    assertFalse("Warning about cluster topology should not be printed",
-        out.toString().contains("Warning: The cluster setup does not support"));
-    assertTrue("Error output should be empty", err.toString().isEmpty());
+      assertEquals(0, ret, "Return value of the command is successful");
+      assertTrue(out.toString()
+              .contains("Erasure coding policy " + testPolicy + " is enabled"), "Enabling policy should be logged");
+      assertFalse(
+              out.toString().contains("Warning: The cluster setup does not support"), "Warning about cluster topology should not be printed");
+      assertTrue(err.toString().isEmpty(), "Error output should be empty");
   }
 
   @Test
@@ -219,12 +217,12 @@
     final int ret = runCommandWithParams("-enablePolicy", "-policy",
         "NonExistentPolicy");
 
-    assertEquals("Return value of the command is unsuccessful", 2, ret);
-    assertFalse("Enabling policy should not be logged when " +
-        "it was unsuccessful", out.toString().contains("is enabled"));
-    assertTrue("Error message should be printed",
-        err.toString().contains("RemoteException: The policy name " +
-            "NonExistentPolicy does not exist"));
+      assertEquals(2, ret, "Return value of the command is unsuccessful");
+      assertFalse(out.toString().contains("is enabled"), "Enabling policy should not be logged when " +
+              "it was unsuccessful");
+      assertTrue(
+              err.toString().contains("RemoteException: The policy name " +
+                      "NonExistentPolicy does not exist"), "Error message should be printed");
   }
 
   @Test
@@ -234,30 +232,30 @@
     cluster = DFSTestUtil.setupCluster(conf, numDataNodes, numRacks, 0);
 
     int ret = runCommandWithParams("-verifyClusterSetup", "-policy", RS_3_2);
-    assertEquals("Return value of the command is not successful", 2, ret);
+      assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughRacksMessage(RS_3_2, numRacks, 3);
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy",
         RS_10_4, RS_3_2);
-    assertEquals("Return value of the command is not successful", 2, ret);
+      assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughDataNodesMessage(RS_10_4 + ", " + RS_3_2,
         numDataNodes, 14);
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy",
         "invalidPolicy");
-    assertEquals("Return value of the command is not successful", -1, ret);
-    assertTrue("Error message should be logged", err.toString()
-        .contains("The given erasure coding policy invalidPolicy " +
-            "does not exist."));
+      assertEquals(-1, ret, "Return value of the command is not successful");
+      assertTrue(err.toString()
+              .contains("The given erasure coding policy invalidPolicy " +
+                      "does not exist."), "Error message should be logged");
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy");
-    assertEquals("Return value of the command is not successful", -1, ret);
-    assertTrue("Error message should be logged", err.toString()
-        .contains("NotEnoughArgumentsException: Not enough arguments: " +
-            "expected 1 but got 0"));
+      assertEquals(-1, ret, "Return value of the command is not successful");
+      assertTrue(err.toString()
+              .contains("NotEnoughArgumentsException: Not enough arguments: " +
+                      "expected 1 but got 0"), "Error message should be logged");
   }
 
   private void resetOutputs() {
@@ -268,25 +266,25 @@
   private void assertNotEnoughDataNodesMessage(String policy,
                                                int numDataNodes,
                                                int expectedNumDataNodes) {
-    assertTrue("Result of cluster topology verify " +
-        "should be logged correctly", out.toString()
-        .contains(expectedNumDataNodes + " DataNodes are required " +
-            "for the erasure coding policies: " +
-            policy + ". The number of DataNodes is only " + numDataNodes));
-    assertTrue("Error output should be empty",
-        err.toString().isEmpty());
+      assertTrue(out.toString()
+              .contains(expectedNumDataNodes + " DataNodes are required " +
+                      "for the erasure coding policies: " +
+                      policy + ". The number of DataNodes is only " + numDataNodes), "Result of cluster topology verify " +
+              "should be logged correctly");
+      assertTrue(
+              err.toString().isEmpty(), "Error output should be empty");
   }
 
   private void assertNotEnoughRacksMessage(String policy,
                                            int numRacks,
                                            int expectedNumRacks) {
-    assertTrue("Result of cluster topology verify " +
-        "should be logged correctly", out.toString()
-        .contains(expectedNumRacks + " racks are required for " +
-            "the erasure coding policies: " +
-            policy + ". The number of racks is only " + numRacks));
-    assertTrue("Error output should be empty",
-        err.toString().isEmpty());
+      assertTrue(out.toString()
+              .contains(expectedNumRacks + " racks are required for " +
+                      "the erasure coding policies: " +
+                      policy + ". The number of racks is only " + numRacks), "Result of cluster topology verify " +
+              "should be logged correctly");
+      assertTrue(
+              err.toString().isEmpty(), "Error output should be empty");
   }
 
   private int runCommandWithParams(String... args) throws Exception{
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
index 82ea34f..e17907e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
@@ -27,10 +27,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -148,10 +145,10 @@
       int ret = ToolRunner.run(new GetConf(conf, out, out), args);
       out.flush();
       System.err.println("Output: " + o.toString());
-      assertEquals("Expected " + (success?"success":"failure") +
-          " for args: " + Joiner.on(" ").join(args) + "\n" +
-          "Output: " + o.toString(),
-          success, ret == 0);
+        assertEquals(
+                success, ret == 0, "Expected " + (success ? "success" : "failure") +
+                " for args: " + Joiner.on(" ").join(args) + "\n" +
+                "Output: " + o.toString());
       return o.toString();
     } finally {
       o.close();
@@ -260,7 +257,7 @@
     for (Command cmd : Command.values()) {
       String arg = cmd.getName();
       CommandHandler handler = Command.getHandler(arg);
-      assertNotNull("missing handler: " + cmd, handler);
+        assertNotNull(handler, "missing handler: " + cmd);
       if (handler.key != null) {
         // First test with configuration missing the required key
         String[] args = {handler.key};
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetGroups.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetGroups.java
index 3d9631d..3430d92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetGroups.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetGroups.java
@@ -24,8 +24,8 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.tools.GetGroupsTestBase;
 import org.apache.hadoop.util.Tool;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Tests for the HDFS implementation of {@link GetGroups}
@@ -34,13 +34,13 @@
   
   private MiniDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public void setUpNameNode() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
   }
   
-  @After
+  @AfterEach
   public void tearDownNameNode() {
     if (cluster != null) {
       cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
index ad77684..f5f15ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
@@ -31,9 +31,9 @@
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test StoragePolicyAdmin commands
@@ -46,7 +46,7 @@
   protected static MiniDFSCluster cluster;
   protected static FileSystem fs;
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException, URISyntaxException {
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -59,7 +59,7 @@
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void clusterShutdown() throws IOException{
     if(fs != null) {
       fs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
index e517d43..4787c1c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
@@ -34,9 +34,9 @@
 import org.apache.hadoop.hdfs.server.namenode.sps.Context;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test StoragePolicySatisfy admin commands.
@@ -50,7 +50,7 @@
   private DistributedFileSystem dfs = null;
   private StoragePolicySatisfier externalSps = null;
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException, URISyntaxException {
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -74,7 +74,7 @@
     externalSps.start(StoragePolicySatisfierMode.EXTERNAL);
   }
 
-  @After
+  @AfterEach
   public void clusterShutdown() throws IOException{
     if(dfs != null) {
       dfs.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
index 3a94959..c45e920 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
@@ -29,8 +29,8 @@
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -40,7 +40,7 @@
  */
 public class TestViewFSStoragePolicyCommands extends TestStoragePolicyCommands {
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException {
     conf = new HdfsConfiguration();
     String clusterName = "cluster";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
index 2f821cf..00e146b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs.tools;
 
 import static org.hamcrest.CoreMatchers.containsString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -44,10 +44,10 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests DFSAdmin with ViewFileSystemOverloadScheme with configured mount links.
@@ -71,7 +71,7 @@
   /**
    * Sets up the configurations and starts the MiniDFSCluster.
    */
-  @Before
+  @BeforeEach
   public void startCluster() throws IOException {
     conf = new Configuration();
     conf.setInt(
@@ -86,10 +86,10 @@
     defaultFSURI =
         URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
     localTargetDir = new File(TEST_ROOT_DIR, "/root/");
-    Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
+    Assertions.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     try {
       System.out.flush();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java
index bc6eb50..783248f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
-import static org.junit.Assert.assertEquals;
-
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
@@ -28,6 +26,8 @@
 import java.util.List;
 import java.util.Scanner;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -43,10 +43,10 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests HDFS commands with ViewFileSystemOverloadScheme with configured mount
@@ -71,7 +71,7 @@
   /**
    * Sets up the configurations and starts the MiniDFSCluster.
    */
-  @Before
+  @BeforeEach
   public void startCluster() throws IOException {
     conf = new Configuration();
     conf.setInt(
@@ -86,10 +86,10 @@
     defaultFSURI =
         URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
     localTargetDir = new File(TEST_ROOT_DIR, "/root/");
-    Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
+    Assertions.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     try {
       System.out.flush();
@@ -165,7 +165,7 @@
       String msg =
           "DF was not calculated on all mounts. The left out mounts are: "
               + mounts;
-      assertEquals(msg, 0, mounts.size());
+        assertEquals(0, mounts.size(), msg);
     } finally {
       fsShell.close();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java
index f10205c..38cdf6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java
@@ -20,7 +20,7 @@
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
@@ -31,7 +31,7 @@
 public class TestWebHDFSStoragePolicyCommands
     extends TestStoragePolicyCommands {
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException, URISyntaxException {
     super.clusterSetUp();
     fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index 226e486..5883813 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -40,11 +40,11 @@
 import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.TemporaryFolder;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
@@ -84,12 +84,12 @@
   @Rule
   public final TemporaryFolder folder = new TemporaryFolder();
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     nnHelper.startCluster(buildDir + "/dfs/");
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     nnHelper.shutdownCluster();
   }
@@ -117,14 +117,14 @@
         runOev(editsParsedXML_caseInSensitive, editsReparsed, "binary", false));
 
 
-    // judgment time
-    assertTrue("Edits " + edits + " should have all op codes",
-        hasAllOpCodes(edits));
+      // judgment time
+      assertTrue(
+              hasAllOpCodes(edits), "Edits " + edits + " should have all op codes");
     LOG.info("Comparing generated file " + editsReparsed
         + " with reference file " + edits);
-    assertTrue(
-        "Generated edits and reparsed (bin to XML to bin) should be same",
-        filesEqualIgnoreTrailingZeros(edits, editsReparsed));
+      assertTrue(
+              filesEqualIgnoreTrailingZeros(edits, editsReparsed),
+              "Generated edits and reparsed (bin to XML to bin) should be same");
   }
 
 
@@ -153,9 +153,9 @@
     assertEquals(0, runOev(editsParsedXml, editsReparsed, "binary", false));
     assertEquals(0, runOev(editsReparsed, editsParsedXml2, "xml", false));
 
-    // judgment time
-    assertTrue("Test round trip", FileUtils.contentEqualsIgnoreEOL(
-        new File(editsParsedXml), new File(editsParsedXml2), "UTF-8"));
+      // judgment time
+      assertTrue(FileUtils.contentEqualsIgnoreEOL(
+              new File(editsParsedXml), new File(editsParsedXml2), "UTF-8"), "Test round trip");
 
     os.close();
   }
@@ -177,15 +177,15 @@
     assertEquals(0,
         runOev(editsStoredParsedXml, editsStoredReparsed, "binary", false));
 
-    // judgement time
-    assertTrue("Edits " + editsStored + " should have all op codes",
-        hasAllOpCodes(editsStored));
-    assertTrue("Reference XML edits and parsed to XML should be same",
-        FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),
-            new File(editsStoredParsedXml), "UTF-8"));
-    assertTrue(
-        "Reference edits and reparsed (bin to XML to bin) should be same",
-        filesEqualIgnoreTrailingZeros(editsStored, editsStoredReparsed));
+      // judgement time
+      assertTrue(
+              hasAllOpCodes(editsStored), "Edits " + editsStored + " should have all op codes");
+      assertTrue(
+              FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),
+                      new File(editsStoredParsedXml), "UTF-8"), "Reference XML edits and parsed to XML should be same");
+      assertTrue(
+              filesEqualIgnoreTrailingZeros(editsStored, editsStoredReparsed),
+              "Reference edits and reparsed (bin to XML to bin) should be same");
   }
 
   /**
@@ -299,11 +299,11 @@
     try {
       System.setOut(out);
       int status = new OfflineEditsViewer().run(new String[] { "-h" });
-      assertTrue("" + "Exit code returned for help option is incorrect",
-          status == 0);
-      Assert.assertFalse(
-          "Invalid Command error displayed when help option is passed.", bytes
-              .toString().contains("Error parsing command-line options"));
+        assertTrue(
+                status == 0, "" + "Exit code returned for help option is incorrect");
+        Assertions.assertFalse(bytes
+                        .toString().contains("Error parsing command-line options"),
+                "Invalid Command error displayed when help option is passed.");
     } finally {
       System.setOut(oldOut);
       IOUtils.closeStream(out);
@@ -322,7 +322,7 @@
     if (oev.go(editFilename, outFilename, "stats", new Flags(), visitor) == 0) {
       statisticsStr = visitor.getStatisticsString();
     }
-    Assert.assertNotNull(statisticsStr);
+    Assertions.assertNotNull(statisticsStr);
 
     String str;
     Long count;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 5c91530..d9a9f85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -98,10 +98,10 @@
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
 import org.apache.hadoop.thirdparty.protobuf.ByteString;
 
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -131,9 +131,7 @@
 import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA;
 import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_CODEC_NAME;
 import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_OPTION;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
@@ -163,7 +161,7 @@
   // data structure and store its fsimage location.
   // We only want to generate the fsimage file once and use it for
   // multiple tests.
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     defaultTimeZone = TimeZone.getDefault();
     TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
@@ -366,7 +364,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     FileUtils.deleteQuietly(tempDir);
     if (originalFsimage != null && originalFsimage.exists()) {
@@ -506,14 +504,14 @@
         if (currentInodeName != null && currentInodeName.length() > 0) {
           if (currentBlockType != null && currentBlockType.equalsIgnoreCase(
               BlockType.STRIPED.name())) {
-            Assert.assertEquals("INode '"
-                    + currentInodeName + "' has unexpected EC Policy!",
-                Byte.parseByte(currentECPolicy),
-                SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
-            Assert.assertEquals("INode '"
-                    + currentInodeName + "' has unexpected replication!",
-                currentRepl,
-                Short.toString(INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS));
+              Assertions.assertEquals(
+                      Byte.parseByte(currentECPolicy),
+                      SystemErasureCodingPolicies.XOR_2_1_POLICY_ID, "INode '"
+                      + currentInodeName + "' has unexpected EC Policy!");
+              Assertions.assertEquals(
+                      currentRepl,
+                      Short.toString(INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS), "INode '"
+                      + currentInodeName + "' has unexpected replication!");
           }
         }
         isInode = false;
@@ -829,8 +827,8 @@
     int status =
         OfflineImageViewerPB.run(new String[] { "-i",
             originalFsimage.getAbsolutePath(), "-o", "-", "-p", "invalid" });
-    assertTrue("Exit code returned for invalid processor option is incorrect",
-        status != 0);
+      assertTrue(
+              status != 0, "Exit code returned for invalid processor option is incorrect");
   }
 
   @Test
@@ -841,17 +839,17 @@
     try {
       System.setOut(out);
       int status = OfflineImageViewerPB.run(new String[] { "-h" });
-      assertTrue("Exit code returned for help option is incorrect", status == 0);
-      Assert.assertFalse(
-          "Invalid Command error displayed when help option is passed.", bytes
-              .toString().contains("Error parsing command-line options"));
+        assertTrue(status == 0, "Exit code returned for help option is incorrect");
+        Assertions.assertFalse(bytes
+                        .toString().contains("Error parsing command-line options"),
+                "Invalid Command error displayed when help option is passed.");
       status =
           OfflineImageViewerPB.run(new String[] { "-h", "-i",
               originalFsimage.getAbsolutePath(), "-o", "-", "-p",
               "FileDistribution", "-maxSize", "512", "-step", "8" });
-      Assert.assertTrue(
-          "Exit code returned for help with other option is incorrect",
-          status == -1);
+        Assertions.assertTrue(
+                status == -1,
+                "Exit code returned for help with other option is incorrect");
     } finally {
       System.setOut(oldOut);
       IOUtils.closeStream(out);
@@ -867,8 +865,8 @@
       String tempDelimitedDirPath = new FileSystemTestHelper().
           getTestRootDir() + "/" + tempDelimitedDirName;
       tempDelimitedDir = new File(tempDelimitedDirPath);
-      Assert.assertTrue("Couldn't create temp directory!",
-          tempDelimitedDir.mkdirs());
+        Assertions.assertTrue(
+                tempDelimitedDir.mkdirs(), "Couldn't create temp directory!");
       testPBDelimitedWriter(tempDelimitedDirPath);
     } finally {
       if (tempDelimitedDir != null) {
@@ -1138,7 +1136,7 @@
     }
     // The XML file we wrote based on the re-created fsimage should be the
     // same as the one we dumped from the original fsimage.
-    Assert.assertEquals("",
+    Assertions.assertEquals("",
       GenericTestUtils.getFilesDiff(reverseImageXml, reverseImage2Xml));
   }
 
@@ -1167,7 +1165,7 @@
     try {
       OfflineImageReconstructor.run(imageWrongVersion.getAbsolutePath(),
           imageWrongVersion.getAbsolutePath() + ".out"); 
-      Assert.fail("Expected OfflineImageReconstructor to fail with " +
+      Assertions.fail("Expected OfflineImageReconstructor to fail with " +
           "version mismatch.");
     } catch (Throwable t) {
       GenericTestUtils.assertExceptionContains("Layout version mismatch.", t);
@@ -1233,7 +1231,7 @@
       // and don't need to do the following operations.
       OfflineImageViewer.main(new String[] {"-i", "-", "-o", "-", "-p",
           "FileDistribution", "-maxSize", "512", "-step", "8", "-h"});
-      Assert.assertFalse(bytes.toString().contains(
+      Assertions.assertFalse(bytes.toString().contains(
           "Error parsing command-line options: "));
     } finally {
       System.setOut(oldOut);
@@ -1254,7 +1252,7 @@
               "FileDistribution", "-maxSize", "512", "-step", "8",
               "-format"});
       assertEquals(0, status);
-      Assert.assertTrue(bytes.toString().contains("(0 B, 8 B]"));
+      Assertions.assertTrue(bytes.toString().contains("(0 B, 8 B]"));
     } finally {
       System.setOut(oldOut);
       IOUtils.closeStream(out);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java
index 4955846..eb3cfcd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java
@@ -50,9 +50,9 @@
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Lists;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.xml.sax.InputSource;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.DefaultHandler;
@@ -73,7 +73,7 @@
 import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.fs.permission.FsAction.NONE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Tests OfflineImageViewer if the input fsimage has HDFS ACLs
@@ -94,7 +94,7 @@
    * We only want to generate the fsimage file once and use it for
    * multiple tests.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     try {
@@ -165,7 +165,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java
index 360ed56..17616c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.HttpURLConnection;
@@ -36,9 +37,9 @@
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests GETCONTENTSUMMARY operation for WebImageViewer
@@ -59,7 +60,7 @@
    * data structure and store its fsimage location. We only want to generate
    * the fsimage file once and use it for multiple tests.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     Configuration conf = new Configuration();
@@ -117,7 +118,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForStoragePolicy.java
index 782feb8..055191c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForStoragePolicy.java
@@ -26,15 +26,15 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.ALLSSD_STORAGE_POLICY_NAME;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
@@ -59,7 +59,7 @@
    * Create a populated namespace for later testing. Save its contents to a
    * data structure and store its fsimage location.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     try {
@@ -133,7 +133,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
index 74069b0..ba7455e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -44,9 +44,9 @@
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests OfflineImageViewer if the input fsimage has XAttributes
@@ -65,7 +65,7 @@
    * structure and store its fsimage location. We only want to generate the
    * fsimage file once and use it for multiple tests.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     Configuration conf = new Configuration();
@@ -103,7 +103,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();
@@ -127,10 +127,10 @@
 
       String content = IOUtils.toString(connection.getInputStream());
 
-      assertTrue("Missing user.attr1 in response ",
-          content.contains("user.attr1"));
-      assertTrue("Missing user.attr2 in response ",
-          content.contains("user.attr2"));
+        assertTrue(
+                content.contains("user.attr1"), "Missing user.attr1 in response ");
+        assertTrue(
+                content.contains("user.attr2"), "Missing user.attr2 in response ");
 
     }
   }
@@ -152,10 +152,10 @@
       assertEquals(HttpURLConnection.HTTP_OK, connection.getResponseCode());
       String content = IOUtils.toString(connection.getInputStream());
 
-      assertTrue("Missing user.attr1 in response ",
-          content.contains("user.attr1"));
-      assertTrue("Missing user.attr2 in response ",
-          content.contains("user.attr2"));
+        assertTrue(
+                content.contains("user.attr1"), "Missing user.attr1 in response ");
+        assertTrue(
+                content.contains("user.attr2"), "Missing user.attr2 in response ");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
index 187b297..e0d29a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -40,9 +40,9 @@
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestOfflineImageViewerWithStripedBlocks {
   private final ErasureCodingPolicy ecPolicy =
@@ -56,7 +56,7 @@
   private final int stripesPerBlock = 3;
   private final int blockSize = cellSize * stripesPerBlock;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     int numDNs = dataBlocks + parityBlocks + 2;
     Configuration conf = new Configuration();
@@ -72,7 +72,7 @@
     fs.mkdirs(eczone);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -148,23 +148,23 @@
     INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
     assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(),
         fileNode.getErasureCodingPolicyID());
-    assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
+      assertTrue(fileNode.getBlocks().length > 0, "Invalid block size");
     long actualFileSize = 0;
     for (BlockInfo blockInfo : fileNode.getBlocks()) {
-      assertTrue("Didn't find block striped information",
-          blockInfo instanceof BlockInfoStriped);
+        assertTrue(
+                blockInfo instanceof BlockInfoStriped, "Didn't find block striped information");
       actualFileSize += blockInfo.getNumBytes();
     }
 
-    assertEquals("Wrongly computed file size contains striped blocks",
-        expectedFileSize, actualFileSize);
+      assertEquals(
+              expectedFileSize, actualFileSize, "Wrongly computed file size contains striped blocks");
 
     // Verify space consumed present in filestatus
     String EXPECTED_FILE_SIZE = "\"length\":"
         + String.valueOf(expectedFileSize);
-    assertTrue(
-        "Wrongly computed file size contains striped blocks, file status:"
-            + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE,
-        fileStatus.contains(EXPECTED_FILE_SIZE));
+      assertTrue(
+              fileStatus.contains(EXPECTED_FILE_SIZE),
+              "Wrongly computed file size contains striped blocks, file status:"
+                      + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestPBImageCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestPBImageCorruption.java
index 092d3a1..95cae30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestPBImageCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestPBImageCorruption.java
@@ -19,7 +19,7 @@
 
 import org.junit.Test;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Unit tests for PBImageCorruptionType, CorruptionEntryBuilder and
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
index e171e2b..12d555a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
@@ -40,7 +40,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class HostsFileWriter {
   private FileSystem localFileSys;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
index 144f990..cb551dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs.util;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -33,9 +30,9 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
@@ -52,7 +49,7 @@
   @Rule
   public ExpectedException exception = ExpectedException.none();
   
-  @Before
+  @BeforeEach
   public void cleanupTestDir() throws IOException {
     assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs());
     FileUtil.fullyDeleteContents(TEST_DIR);
@@ -80,11 +77,11 @@
    */
   @Test
   public void testOverwriteFile() throws IOException {
-    assertTrue("Creating empty dst file", DST_FILE.createNewFile());
+      assertTrue(DST_FILE.createNewFile(), "Creating empty dst file");
     
     OutputStream fos = new AtomicFileOutputStream(DST_FILE);
-    
-    assertTrue("Empty file still exists", DST_FILE.exists());
+
+      assertTrue(DST_FILE.exists(), "Empty file still exists");
     fos.write(TEST_STRING.getBytes());
     fos.flush();
     
@@ -121,9 +118,9 @@
     
     // Should not have touched original file
     assertEquals(TEST_STRING_2, DFSTestUtil.readFile(DST_FILE));
-    
-    assertEquals("Temporary file should have been cleaned up",
-        DST_FILE.getName(), Joiner.on(",").join(TEST_DIR.list()));
+
+      assertEquals(
+              DST_FILE.getName(), Joiner.on(",").join(TEST_DIR.list()), "Temporary file should have been cleaned up");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestBestEffortLongFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestBestEffortLongFile.java
index c57dc970..846736f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestBestEffortLongFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestBestEffortLongFile.java
@@ -23,17 +23,18 @@
 
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestBestEffortLongFile {
 
   private static final File FILE = new File(MiniDFSCluster.getBaseDirectory() +
       File.separatorChar + "TestBestEffortLongFile");
 
-  @Before
+  @BeforeEach
   public void cleanup() {
     if (FILE.exists()) {
       assertTrue(FILE.delete());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
index cf02180..7e884c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
@@ -22,11 +22,11 @@
 
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test for JSON based HostsFileReader.
@@ -44,11 +44,11 @@
   private final File legacyFile =
       new File(TESTCACHEDATADIR, "legacy.dfs.hosts.json");
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     // Delete test file after running tests
     newFile.delete();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java
index 2aba515..dd9033b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java
@@ -16,7 +16,9 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs.util;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.jupiter.api.Test;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -25,8 +27,6 @@
 import java.util.NavigableMap;
 import java.util.TreeMap;
 
-import org.junit.Test;
-
 public class TestCyclicIteration {
   @Test
   public void testCyclicIteration() throws Exception {
@@ -58,7 +58,7 @@
       //verify results
       for(int i = 0; i < integers.length; i++) {
         final int j = ((start+2)/2 + i)%integers.length;
-        assertEquals("i=" + i + ", j=" + j, iteration.get(i), integers[j]);
+          assertEquals(iteration.get(i), integers[j], "i=" + i + ", j=" + j);
       }
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
index 9c6839c..15a87a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
@@ -29,8 +29,8 @@
 import org.apache.hadoop.hdfs.util.Diff;
 import org.apache.hadoop.hdfs.util.Diff.Container;
 import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 /**
  * Test {@link Diff} with {@link INode}.
@@ -192,7 +192,7 @@
           final int j = Diff.search(previous, inode.getKey());
           final INode expected = j < 0? null: previous.get(j);
           // must be the same object (equals is not enough)
-          Assert.assertTrue(computed == expected);
+          Assertions.assertTrue(computed == expected);
         }
 
         {// test accessCurrent
@@ -208,7 +208,7 @@
           final int j = Diff.search(current, inode.getKey());
           final INode expected = j < 0? null: current.get(j);
           // must be the same object (equals is not enough)
-          Assert.assertTrue(computed == expected);
+          Assertions.assertTrue(computed == expected);
         }
       }
     }
@@ -250,7 +250,7 @@
   static void create(INode inode, final List<INode> current,
       Diff<byte[], INode> diff) {
     final int i = Diff.search(current, inode.getKey());
-    Assert.assertTrue(i < 0);
+    Assertions.assertTrue(i < 0);
     current.add(-i - 1, inode);
     if (diff != null) {
       //test undo with 1/UNDO_TEST_P probability
@@ -303,7 +303,7 @@
   static void modify(INode inode, final List<INode> current,
       Diff<byte[], INode> diff) {
     final int i = Diff.search(current, inode.getKey());
-    Assert.assertTrue(i >= 0);
+    Assertions.assertTrue(i >= 0);
     final INodeDirectory oldinode = (INodeDirectory)current.get(i);
     final INodeDirectory newinode = new INodeDirectory(oldinode, false,
       oldinode.getFeatures());
@@ -333,6 +333,6 @@
   }
   
   static void assertDiff(String s, Diff<byte[], INode> diff) {
-    Assert.assertEquals(s, diff.toString());
+    Assertions.assertEquals(s, diff.toString());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
index 6507bf3..3bf0984 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
@@ -17,12 +17,7 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -33,8 +28,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestLightWeightHashSet{
 
@@ -45,7 +40,7 @@
   private LightWeightHashSet<Integer> set;
   private Random rand;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     float maxF = LightWeightHashSet.DEFAULT_MAX_LOAD_FACTOR;
     float minF = LightWeightHashSet.DEFAUT_MIN_LOAD_FACTOR;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
index 6c55f28..1097962 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -30,8 +27,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestLightWeightLinkedSet {
 
@@ -42,7 +39,7 @@
   private LightWeightLinkedSet<Integer> set;
   private Random rand;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     float maxF = LightWeightLinkedSet.DEFAULT_MAX_LOAD_FACTOR;
     float minF = LightWeightLinkedSet.DEFAUT_MIN_LOAD_FACTOR;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
index 35fa46d..79f39be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -30,8 +28,8 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestMD5FileUtils {
   private static final File TEST_DIR = PathUtils.getTestDir(TestMD5FileUtils.class);
@@ -43,7 +41,7 @@
     DFSTestUtil.generateSequentialBytes(0, TEST_DATA_LEN);
   private static final MD5Hash TEST_MD5 = MD5Hash.digest(TEST_DATA);
   
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     FileUtil.fullyDelete(TEST_DIR);
     assertTrue(TEST_DIR.mkdirs());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java
index 6444778..135fcf3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdfs.util;
 
 import org.apache.hadoop.hdfs.server.namenode.AclFeature;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * Verify ReferenceCount map in concurrent scenarios.
@@ -37,28 +37,28 @@
     ReferenceCountMap<AclFeature> countMap = new ReferenceCountMap<>();
     countMap.put(aclFeature1);
     countMap.put(aclFeature2);
-    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature2));
+    Assertions.assertEquals(1, countMap.getReferenceCount(aclFeature1));
+    Assertions.assertEquals(1, countMap.getReferenceCount(aclFeature2));
 
     countMap.put(aclFeature1);
     countMap.put(aclFeature2);
-    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature2));
+    Assertions.assertEquals(2, countMap.getReferenceCount(aclFeature1));
+    Assertions.assertEquals(2, countMap.getReferenceCount(aclFeature2));
 
     countMap.put(aclFeature1);
-    Assert.assertEquals(3, countMap.getReferenceCount(aclFeature1));
+    Assertions.assertEquals(3, countMap.getReferenceCount(aclFeature1));
     countMap.put(aclFeature1);
-    Assert.assertEquals(4, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature2));
+    Assertions.assertEquals(4, countMap.getReferenceCount(aclFeature1));
+    Assertions.assertEquals(2, countMap.getReferenceCount(aclFeature2));
 
     //Delete operations:
     countMap.remove(aclFeature1);
     countMap.remove(aclFeature2);
-    Assert.assertEquals(3, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature2));
+    Assertions.assertEquals(3, countMap.getReferenceCount(aclFeature1));
+    Assertions.assertEquals(1, countMap.getReferenceCount(aclFeature2));
 
     //Verify unique elements in map
-    Assert.assertEquals(2, countMap.getUniqueElementsSize());
+    Assertions.assertEquals(2, countMap.getUniqueElementsSize());
   }
 
   @Test
@@ -73,15 +73,15 @@
 
     putThread1.join();
     putThread2.join();
-    Assert.assertEquals(2 * LOOP_COUNTER,
+    Assertions.assertEquals(2 * LOOP_COUNTER,
         countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(2 * LOOP_COUNTER,
+    Assertions.assertEquals(2 * LOOP_COUNTER,
         countMap.getReferenceCount(aclFeature2));
 
     removeThread1.start();
     removeThread1.join();
-    Assert.assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature2));
+    Assertions.assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature1));
+    Assertions.assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature2));
   }
 
   class PutThread extends Thread {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
index 9d8c82c..737b87d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
@@ -31,17 +31,15 @@
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*;
 
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 
 import java.nio.ByteBuffer;
 import java.util.Random;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Need to cover the following combinations:
@@ -100,7 +98,7 @@
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   public void setup(){
     blockGroupSizes = new int[]{1, getDelta(cellSize), cellSize,
         getDelta(dataBlocks) * cellSize,
@@ -285,8 +283,8 @@
             if (hashIntToByte(brStart + i) != assembled.get(i)) {
               System.out.println("Oops");
             }
-            assertEquals("Byte at " + (brStart + i) + " should be the same",
-                hashIntToByte(brStart + i), assembled.get(i));
+              assertEquals(
+                      hashIntToByte(brStart + i), assembled.get(i), "Byte at " + (brStart + i) + " should be the same");
           }
         }
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java
index 16df254..eaf51e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java
@@ -18,16 +18,16 @@
 package org.apache.hadoop.hdfs.util;
 
 import org.apache.hadoop.hdfs.util.XMLUtils.UnmanglingError;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 public class TestXMLUtils {
   private static void testRoundTripImpl(String str, String expectedMangled,
                                     boolean encodeEntityRefs) {
     String mangled = XMLUtils.mangleXmlString(str, encodeEntityRefs);
-    Assert.assertEquals(expectedMangled, mangled);
+    Assertions.assertEquals(expectedMangled, mangled);
     String unmangled = XMLUtils.unmangleXmlString(mangled, encodeEntityRefs);
-    Assert.assertEquals(str, unmangled);
+    Assertions.assertEquals(str, unmangled);
   }
 
   private static void testRoundTrip(String str, String expectedMangled) {
@@ -65,13 +65,13 @@
   public void testInvalidSequence() throws Exception {
     try {
       XMLUtils.unmangleXmlString("\\000g;foo", false);
-      Assert.fail("expected an unmangling error");
+      Assertions.fail("expected an unmangling error");
     } catch (UnmanglingError e) {
       // pass through
     }
     try {
       XMLUtils.unmangleXmlString("\\0", false);
-      Assert.fail("expected an unmangling error");
+      Assertions.fail("expected an unmangling error");
     } catch (UnmanglingError e) {
       // pass through
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java
index 7f88416..4aa7fa5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java
@@ -22,14 +22,13 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
-import org.junit.Test;
-
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 public class TestAuthFilter {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
index e88937a..e30b8ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
@@ -27,6 +27,8 @@
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
+import static org.junit.jupiter.api.Assertions.*;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSMainOperationsBaseTest;
@@ -45,13 +47,13 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
-  {
+  static {
     GenericTestUtils.setLogLevel(ExceptionHandler.LOG, Level.TRACE);
   }
 
@@ -68,7 +70,7 @@
     return fileSystem;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() {
     final Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
@@ -100,7 +102,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -134,7 +136,7 @@
     Assert.assertFalse(exists(fSys, paths[2]));
 
     FileStatus fileStatus = fSys.getFileStatus(catPath);
-    Assert.assertEquals(1024*4, fileStatus.getLen());
+    assertEquals(1024*4, fileStatus.getLen());
   }
 
   @Test
@@ -152,16 +154,16 @@
 
     boolean isReady = fSys.truncate(file, newLength);
 
-    Assert.assertTrue("Recovery is not expected.", isReady);
+      assertTrue(isReady, "Recovery is not expected.");
 
     FileStatus fileStatus = fSys.getFileStatus(file);
-    Assert.assertEquals(fileStatus.getLen(), newLength);
+    assertEquals(fileStatus.getLen(), newLength);
     AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());
 
     ContentSummary cs = fSys.getContentSummary(dir);
-    Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
-        newLength * repl);
-    Assert.assertTrue("Deleted", fSys.delete(dir, true));
+    assertEquals(cs.getSpaceConsumed(),
+            newLength * repl, "Bad disk space usage");
+    assertTrue(fSys.delete(dir, true), "Deleted");
   }
 
   // Test that WebHdfsFileSystem.jsonParse() closes the connection's input
@@ -196,11 +198,11 @@
     doReturn(myIn).when(spyConn).getInputStream();
 
     try {
-      Assert.assertFalse(closedInputStream);
+      assertFalse(closedInputStream);
       WebHdfsFileSystem.jsonParse(spyConn, false);
-      Assert.assertTrue(closedInputStream);
+      assertTrue(closedInputStream);
     } catch(IOException ioe) {
-      junit.framework.TestCase.fail();
+      fail();
     }
     conn.disconnect();
   }
@@ -218,7 +220,7 @@
     Path testSubDir = getTestRootPath(fSys, "test/hadoop/file/subdir");
     try {
       fSys.mkdirs(testSubDir);
-      Assert.fail("Should throw IOException.");
+      fail("Should throw IOException.");
     } catch (IOException e) {
       // expected
     }
@@ -231,7 +233,7 @@
     Path testDeepSubDir = getTestRootPath(fSys, "test/hadoop/file/deep/sub/dir");
     try {
       fSys.mkdirs(testDeepSubDir);
-      Assert.fail("Should throw IOException.");
+      fail("Should throw IOException.");
     } catch (IOException e) {
       // expected
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
index 34bb336..f05577b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
@@ -33,10 +33,10 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 public class TestHttpsFileSystem {
   private static final String BASEDIR =
@@ -49,7 +49,7 @@
   private static String sslConfDir;
   private static String nnAddr;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     conf = new Configuration();
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
@@ -78,7 +78,7 @@
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -94,9 +94,9 @@
     FSDataOutputStream os = fs.create(f);
     os.write(23);
     os.close();
-    Assert.assertTrue(fs.exists(f));
+    Assertions.assertTrue(fs.exists(f));
     InputStream is = fs.open(f);
-    Assert.assertEquals(23, is.read());
+    Assertions.assertEquals(23, is.read());
     is.close();
     fs.close();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 839f894..c3d8da1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -21,8 +21,8 @@
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -53,9 +53,8 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
 
-import org.junit.Assert;
-import org.junit.Test;
-
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
 
@@ -106,9 +105,9 @@
     final FileStatus fs2 = toFileStatus(s2, parent);
     System.out.println("s2      = " + s2);
     System.out.println("fs2     = " + fs2);
-    Assert.assertEquals(status.getErasureCodingPolicy(),
+    Assertions.assertEquals(status.getErasureCodingPolicy(),
         s2.getErasureCodingPolicy());
-    Assert.assertEquals(fstatus, fs2);
+    Assertions.assertEquals(fstatus, fs2);
   }
 
   /**
@@ -172,7 +171,7 @@
         .path(DFSUtil.string2Bytes("foo"))
         .fileId(HdfsConstants.GRANDFATHER_INODE_ID)
         .build();
-    Assert.assertTrue(status.getErasureCodingPolicy() == null);
+    Assertions.assertTrue(status.getErasureCodingPolicy() == null);
 
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
@@ -185,7 +184,7 @@
     System.out.println("s2      = " + s2);
     System.out.println("fs2     = " + fs2);
 
-    Assert.assertEquals(fstatus, fs2);
+    Assertions.assertEquals(fstatus, fs2);
   }
   
   @Test
@@ -237,15 +236,15 @@
     response.put("cacheUsed", 321l);
 
     DatanodeInfo di = JsonUtilClient.toDatanodeInfo(response);
-    Assert.assertEquals(name, di.getXferAddr());
+    Assertions.assertEquals(name, di.getXferAddr());
 
     // The encoded result should contain name, ipAddr and xferPort.
     Map<String, Object> r = JsonUtil.toJsonMap(di);
-    Assert.assertEquals(name, r.get("name"));
-    Assert.assertEquals("127.0.0.1", r.get("ipAddr"));
+    Assertions.assertEquals(name, r.get("name"));
+    Assertions.assertEquals("127.0.0.1", r.get("ipAddr"));
     // In this test, it is Integer instead of Long since json was not actually
     // involved in constructing the map.
-    Assert.assertEquals(1004, (int)(Integer)r.get("xferPort"));
+    Assertions.assertEquals(1004, (int)(Integer)r.get("xferPort"));
 
     // Invalid names
     String[] badNames = {"127.0.0.1", "127.0.0.1:", ":", "127.0.0.1:sweet", ":123"};
@@ -281,8 +280,8 @@
     aclStatusBuilder.addEntries(aclSpec);
     aclStatusBuilder.stickyBit(false);
 
-    Assert.assertEquals("Should be equal", aclStatusBuilder.build(),
-        JsonUtilClient.toAclStatus(json));
+      Assertions.assertEquals(aclStatusBuilder.build(),
+              JsonUtilClient.toAclStatus(json), "Should be equal");
   }
 
   @Test
@@ -299,7 +298,7 @@
             aclEntry(ACCESS, GROUP, READ_WRITE));
 
     aclStatusBuilder.addEntries(aclSpec);
-    Assert.assertEquals(jsonString,
+    Assertions.assertEquals(jsonString,
         JsonUtil.toJsonString(aclStatusBuilder.build()));
 
   }
@@ -334,7 +333,7 @@
         .snapshotDirectoryCount(snapshotDirectoryCount)
         .snapshotSpaceConsumed(snapshotSpaceConsumed).build();
 
-    Assert.assertEquals(jsonString, JsonUtil.toJsonString(contentSummary));
+    Assertions.assertEquals(jsonString, JsonUtil.toJsonString(contentSummary));
   }
 
   @Test
@@ -350,7 +349,7 @@
     xAttrs.add(xAttr1);
     xAttrs.add(xAttr2);
     
-    Assert.assertEquals(jsonString, JsonUtil.toJsonString(xAttrs, 
+    Assertions.assertEquals(jsonString, JsonUtil.toJsonString(xAttrs, 
         XAttrCodec.HEX));
   }
   
@@ -370,11 +369,11 @@
     Map<String, byte[]> xAttrMap = XAttrHelper.buildXAttrMap(xAttrs);
     Map<String, byte[]> parsedXAttrMap = JsonUtilClient.toXAttrs(json);
     
-    Assert.assertEquals(xAttrMap.size(), parsedXAttrMap.size());
+    Assertions.assertEquals(xAttrMap.size(), parsedXAttrMap.size());
     Iterator<Entry<String, byte[]>> iter = xAttrMap.entrySet().iterator();
     while(iter.hasNext()) {
       Entry<String, byte[]> entry = iter.next();
-      Assert.assertArrayEquals(entry.getValue(), 
+      Assertions.assertArrayEquals(entry.getValue(), 
           parsedXAttrMap.get(entry.getKey()));
     }
   }
@@ -388,13 +387,13 @@
 
     // Get xattr: user.a2
     byte[] value = JsonUtilClient.getXAttr(json, "user.a2");
-    Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
+    Assertions.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
   }
 
   private void checkDecodeFailure(Map<String, Object> map) {
     try {
       JsonUtilClient.toDatanodeInfo(map);
-      Assert.fail("Exception not thrown against bad input.");
+      Assertions.fail("Exception not thrown against bad input.");
     } catch (Exception e) {
       // expected
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 68087c4..35a6310 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -18,23 +18,89 @@
 
 package org.apache.hadoop.hdfs.web;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkOpStatistics;
-import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkStatistics;
-import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.type.MapType;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.TestFileCreation;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream;
+import org.apache.hadoop.hdfs.web.resources.LengthParam;
+import org.apache.hadoop.hdfs.web.resources.NoRedirectParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
+import org.apache.hadoop.hdfs.web.resources.Param;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.test.Whitebox;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.util.DataChecksum;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 import java.io.EOFException;
 import java.io.File;
@@ -58,93 +124,25 @@
 import java.util.NoSuchElementException;
 import java.util.Random;
 
-import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.fs.QuotaUsage;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
-import org.apache.hadoop.test.LambdaTestUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.BlockStoragePolicySpi;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclEntryScope;
-import org.apache.hadoop.fs.permission.AclEntryType;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.TestDFSClientRetries;
-import org.apache.hadoop.hdfs.TestFileCreation;
-import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
-import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
-import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkOpStatistics;
+import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkStatistics;
+import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
-import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
-import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
-import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
-import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream;
-import org.apache.hadoop.hdfs.web.resources.LengthParam;
-import org.apache.hadoop.hdfs.web.resources.NoRedirectParam;
-import org.apache.hadoop.hdfs.web.resources.OffsetParam;
-import org.apache.hadoop.hdfs.web.resources.Param;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.RetriableException;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.test.Whitebox;
-import org.slf4j.event.Level;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.type.MapType;
-
+import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.Mockito.doReturn;
@@ -164,7 +162,7 @@
 
   private static MiniDFSCluster cluster = null;
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (null != cluster) {
       cluster.shutdown();
@@ -225,7 +223,7 @@
     final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
         WebHdfsConstants.WEBHDFS_SCHEME);
     final Path dir = new Path("/test/largeFile");
-    Assert.assertTrue(fs.mkdirs(dir));
+    Assertions.assertTrue(fs.mkdirs(dir));
 
     final byte[] data = new byte[1 << 20];
     RANDOM.nextBytes(data);
@@ -251,7 +249,7 @@
     }
     t.end(fileLength);
 
-    Assert.assertEquals(fileLength, fs.getFileStatus(p).getLen());
+    Assertions.assertEquals(fileLength, fs.getFileStatus(p).getLen());
 
     final long smallOffset = RANDOM.nextInt(1 << 20) + (1 << 20);
     final long largeOffset = fileLength - smallOffset;
@@ -269,7 +267,7 @@
       int j = (int)(offset % actual.length);
       for(int i = 0; i < n; i++) {
         if (expected[j] != actual[i]) {
-          Assert.fail("expected[" + j + "]=" + expected[j]
+          Assertions.fail("expected[" + j + "]=" + expected[j]
               + " != actual[" + i + "]=" + actual[i]
               + ", offset=" + offset + ", remaining=" + remaining + ", n=" + n);
         }
@@ -363,12 +361,12 @@
             FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
                 WebHdfsConstants.WEBHDFS_SCHEME);
             Path d = new Path("/my-dir");
-            Assert.assertTrue(fs.mkdirs(d));
+            Assertions.assertTrue(fs.mkdirs(d));
             // Iterator should have no items when dir is empty
             RemoteIterator<FileStatus> it = fs.listStatusIterator(d);
             assertFalse(it.hasNext());
             Path p = new Path(d, "file-" + 0);
-            Assert.assertTrue(fs.createNewFile(p));
+            Assertions.assertTrue(fs.createNewFile(p));
             // Iterator should have an item when dir is not empty
             it = fs.listStatusIterator(d);
             assertTrue(it.hasNext());
@@ -376,11 +374,11 @@
             assertFalse(it.hasNext());
             for (int i = 1; i < listLimit * 3; i++) {
               p = new Path(d, "file-" + i);
-              Assert.assertTrue(fs.createNewFile(p));
+              Assertions.assertTrue(fs.createNewFile(p));
             }
             // Check the FileStatus[] listing
             FileStatus[] statuses = fs.listStatus(d);
-            Assert.assertEquals(listLimit * 3, statuses.length);
+            Assertions.assertEquals(listLimit * 3, statuses.length);
             // Check the iterator-based listing
             GenericTestUtils.setLogLevel(WebHdfsFileSystem.LOG, Level.TRACE);
             GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG,
@@ -389,18 +387,18 @@
             int count = 0;
             while (it.hasNext()) {
               FileStatus stat = it.next();
-              assertEquals("FileStatuses not equal", statuses[count], stat);
+                assertEquals(statuses[count], stat, "FileStatuses not equal");
               count++;
             }
-            assertEquals("Different # of statuses!", statuses.length, count);
+              assertEquals(statuses.length, count, "Different # of statuses!");
             // Do some more basic iterator tests
             it = fs.listStatusIterator(d);
             // Try advancing the iterator without calling hasNext()
             for (int i = 0; i < statuses.length; i++) {
               FileStatus stat = it.next();
-              assertEquals("FileStatuses not equal", statuses[i], stat);
+                assertEquals(statuses[i], stat, "FileStatuses not equal");
             }
-            assertFalse("No more items expected", it.hasNext());
+              assertFalse(it.hasNext(), "No more items expected");
             // Try doing next when out of items
             try {
               it.next();
@@ -488,7 +486,7 @@
             FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
                 WebHdfsConstants.WEBHDFS_SCHEME);
             Path d = new Path("/my-dir");
-            Assert.assertTrue(fs.mkdirs(d));
+            Assertions.assertTrue(fs.mkdirs(d));
             // Test also specifying a default ACL with a numeric username
             // and another of a groupname with '@'
             fs.modifyAclEntries(d, ImmutableList.of(new AclEntry.Builder()
@@ -515,7 +513,7 @@
       FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
           WebHdfsConstants.WEBHDFS_SCHEME);
       fs.create(new Path("/testnodatanode"));
-      Assert.fail("No exception was thrown");
+      Assertions.fail("No exception was thrown");
     } catch (IOException ex) {
       GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
     }
@@ -544,7 +542,7 @@
     assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
     webHdfs.createSnapshot(bar, "s1");
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    Assertions.assertTrue(webHdfs.exists(s1path));
     SnapshottableDirectoryStatus[] snapshottableDirs =
         dfs.getSnapshottableDirListing();
     assertEquals(1, snapshottableDirs.length);
@@ -605,43 +603,41 @@
 
     FileStatus expectedECDirStatus = dfs.getFileStatus(ecDir);
     FileStatus actualECDirStatus = webHdfs.getFileStatus(ecDir);
-    Assert.assertEquals(expectedECDirStatus.isErasureCoded(),
+    Assertions.assertEquals(expectedECDirStatus.isErasureCoded(),
         actualECDirStatus.isErasureCoded());
     ContractTestUtils.assertErasureCoded(dfs, ecDir);
-    assertTrue(
+    assertTrue(actualECDirStatus.toString().contains("isErasureCoded=true"),
         ecDir + " should have erasure coding set in "
-            + "FileStatus#toString(): " + actualECDirStatus,
-        actualECDirStatus.toString().contains("isErasureCoded=true"));
+            + "FileStatus#toString(): " + actualECDirStatus);
 
     FileStatus expectedECFileStatus = dfs.getFileStatus(ecFile);
     FileStatus actualECFileStatus = webHdfs.getFileStatus(ecFile);
-    Assert.assertEquals(expectedECFileStatus.isErasureCoded(),
+    Assertions.assertEquals(expectedECFileStatus.isErasureCoded(),
         actualECFileStatus.isErasureCoded());
     ContractTestUtils.assertErasureCoded(dfs, ecFile);
-    assertTrue(
+    assertTrue(actualECFileStatus.toString().contains("isErasureCoded=true"),
         ecFile + " should have erasure coding set in "
-            + "FileStatus#toString(): " + actualECFileStatus,
-        actualECFileStatus.toString().contains("isErasureCoded=true"));
+            + "FileStatus#toString(): " + actualECFileStatus);
 
     FileStatus expectedNormalDirStatus = dfs.getFileStatus(normalDir);
     FileStatus actualNormalDirStatus = webHdfs.getFileStatus(normalDir);
-    Assert.assertEquals(expectedNormalDirStatus.isErasureCoded(),
+    Assertions.assertEquals(expectedNormalDirStatus.isErasureCoded(),
         actualNormalDirStatus.isErasureCoded());
     ContractTestUtils.assertNotErasureCoded(dfs, normalDir);
     assertTrue(
+        actualNormalDirStatus.toString().contains("isErasureCoded=false"),
         normalDir + " should have erasure coding unset in "
-            + "FileStatus#toString(): " + actualNormalDirStatus,
-        actualNormalDirStatus.toString().contains("isErasureCoded=false"));
+            + "FileStatus#toString(): " + actualNormalDirStatus);
 
     FileStatus expectedNormalFileStatus = dfs.getFileStatus(normalFile);
     FileStatus actualNormalFileStatus = webHdfs.getFileStatus(normalDir);
-    Assert.assertEquals(expectedNormalFileStatus.isErasureCoded(),
+    Assertions.assertEquals(expectedNormalFileStatus.isErasureCoded(),
         actualNormalFileStatus.isErasureCoded());
     ContractTestUtils.assertNotErasureCoded(dfs, normalFile);
     assertTrue(
+        actualNormalFileStatus.toString().contains("isErasureCoded=false"),
         normalFile + " should have erasure coding unset in "
-            + "FileStatus#toString(): " + actualNormalFileStatus,
-        actualNormalFileStatus.toString().contains("isErasureCoded=false"));
+            + "FileStatus#toString(): " + actualNormalFileStatus);
   }
 
   /**
@@ -674,9 +670,9 @@
     // create snapshot without specifying name
     final Path spath = webHdfs.createSnapshot(foo, null);
 
-    Assert.assertTrue(webHdfs.exists(spath));
+    Assertions.assertTrue(webHdfs.exists(spath));
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    Assertions.assertTrue(webHdfs.exists(s1path));
   }
 
   /**
@@ -697,16 +693,16 @@
 
     webHdfs.createSnapshot(foo, "s1");
     final Path spath = webHdfs.createSnapshot(foo, null);
-    Assert.assertTrue(webHdfs.exists(spath));
+    Assertions.assertTrue(webHdfs.exists(spath));
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    Assertions.assertTrue(webHdfs.exists(s1path));
 
     // delete operation snapshot name as null
     try {
       webHdfs.deleteSnapshot(foo, null);
       fail("Expected IllegalArgumentException");
     } catch (RemoteException e) {
-      Assert.assertEquals("Required param snapshotname for "
+      Assertions.assertEquals("Required param snapshotname for "
           + "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
     }
 
@@ -741,7 +737,7 @@
     dfs.allowSnapshot(foo);
     webHdfs.createSnapshot(foo, "s1");
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    Assertions.assertTrue(webHdfs.exists(s1path));
 
     Path file3 = new Path(foo, "file3");
     DFSTestUtil.createFile(dfs, file3, 100, (short) 1, 0);
@@ -754,9 +750,9 @@
     SnapshotDiffReport diffReport =
         webHdfs.getSnapshotDiffReport(foo, "s1", "s2");
 
-    Assert.assertEquals("/foo", diffReport.getSnapshotRoot());
-    Assert.assertEquals("s1", diffReport.getFromSnapshot());
-    Assert.assertEquals("s2", diffReport.getLaterSnapshotName());
+    Assertions.assertEquals("/foo", diffReport.getSnapshotRoot());
+    Assertions.assertEquals("s1", diffReport.getFromSnapshot());
+    Assertions.assertEquals("s2", diffReport.getLaterSnapshotName());
     DiffReportEntry entry0 =
         new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes(""));
     DiffReportEntry entry1 =
@@ -767,18 +763,18 @@
         DFSUtil.string2Bytes("file2"), DFSUtil.string2Bytes("file4"));
     DiffReportEntry entry4 =
         new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file3"));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry0));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry1));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry2));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry3));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry4));
-    Assert.assertEquals(diffReport.getDiffList().size(), 5);
+    Assertions.assertTrue(diffReport.getDiffList().contains(entry0));
+    Assertions.assertTrue(diffReport.getDiffList().contains(entry1));
+    Assertions.assertTrue(diffReport.getDiffList().contains(entry2));
+    Assertions.assertTrue(diffReport.getDiffList().contains(entry3));
+    Assertions.assertTrue(diffReport.getDiffList().contains(entry4));
+    Assertions.assertEquals(diffReport.getDiffList().size(), 5);
 
     // Test with fromSnapshot and toSnapshot as null.
     diffReport = webHdfs.getSnapshotDiffReport(foo, null, "s2");
-    Assert.assertEquals(diffReport.getDiffList().size(), 0);
+    Assertions.assertEquals(diffReport.getDiffList().size(), 0);
     diffReport = webHdfs.getSnapshotDiffReport(foo, "s1", null);
-    Assert.assertEquals(diffReport.getDiffList().size(), 5);
+    Assertions.assertEquals(diffReport.getDiffList().size(), 5);
   }
 
   /**
@@ -798,7 +794,7 @@
     dfs.mkdirs(bar);
     SnapshottableDirectoryStatus[] statuses =
         webHdfs.getSnapshottableDirectoryList();
-    Assert.assertNull(statuses);
+    Assertions.assertNull(statuses);
     dfs.allowSnapshot(foo);
     dfs.allowSnapshot(bar);
     Path file0 = new Path(foo, "file0");
@@ -810,37 +806,37 @@
         dfs.getSnapshottableDirListing();
 
     for (int i = 0; i < dfsStatuses.length; i++) {
-      Assert.assertEquals(statuses[i].getSnapshotNumber(),
+      Assertions.assertEquals(statuses[i].getSnapshotNumber(),
           dfsStatuses[i].getSnapshotNumber());
-      Assert.assertEquals(statuses[i].getSnapshotQuota(),
+      Assertions.assertEquals(statuses[i].getSnapshotQuota(),
           dfsStatuses[i].getSnapshotQuota());
-      Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
+      Assertions.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
           dfsStatuses[i].getParentFullPath()));
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
           statuses[i].getDirStatus().getChildrenNum());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
           statuses[i].getDirStatus().getModificationTime());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
           statuses[i].getDirStatus().isDir());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
           statuses[i].getDirStatus().getAccessTime());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
           statuses[i].getDirStatus().getPermission());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
           statuses[i].getDirStatus().getOwner());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
           statuses[i].getDirStatus().getGroup());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
           statuses[i].getDirStatus().getPath());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
           statuses[i].getDirStatus().getFileId());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
           statuses[i].getDirStatus().hasAcl());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
           statuses[i].getDirStatus().isEncrypted());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
           statuses[i].getDirStatus().isErasureCoded());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
+      Assertions.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
           statuses[i].getDirStatus().isSnapshotEnabled());
     }
   }
@@ -864,37 +860,37 @@
       SnapshotStatus[] dfsStatuses = dfs.getSnapshotListing(foo);
 
       for (int i = 0; i < dfsStatuses.length; i++) {
-        Assert.assertEquals(statuses[i].getSnapshotID(),
+        Assertions.assertEquals(statuses[i].getSnapshotID(),
             dfsStatuses[i].getSnapshotID());
-        Assert.assertEquals(statuses[i].isDeleted(),
+        Assertions.assertEquals(statuses[i].isDeleted(),
             dfsStatuses[i].isDeleted());
-        Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
+        Assertions.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
             dfsStatuses[i].getParentFullPath()));
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
             statuses[i].getDirStatus().getChildrenNum());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
             statuses[i].getDirStatus().getModificationTime());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
             statuses[i].getDirStatus().isDir());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
             statuses[i].getDirStatus().getAccessTime());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
             statuses[i].getDirStatus().getPermission());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
             statuses[i].getDirStatus().getOwner());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
             statuses[i].getDirStatus().getGroup());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
             statuses[i].getDirStatus().getPath());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
             statuses[i].getDirStatus().getFileId());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
             statuses[i].getDirStatus().hasAcl());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
             statuses[i].getDirStatus().isEncrypted());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
             statuses[i].getDirStatus().isErasureCoded());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
+        Assertions.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
             statuses[i].getDirStatus().isSnapshotEnabled());
       }
     } finally {
@@ -942,14 +938,14 @@
 
     webHdfs.createSnapshot(foo, "s1");
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    Assertions.assertTrue(webHdfs.exists(s1path));
 
     // rename s1 to s2 with oldsnapshotName as null
     try {
       webHdfs.renameSnapshot(foo, null, "s2");
       fail("Expected IllegalArgumentException");
     } catch (RemoteException e) {
-      Assert.assertEquals("Required param oldsnapshotname for "
+      Assertions.assertEquals("Required param oldsnapshotname for "
           + "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
     }
 
@@ -957,7 +953,7 @@
     webHdfs.renameSnapshot(foo, "s1", "s2");
     assertFalse(webHdfs.exists(s1path));
     final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
-    Assert.assertTrue(webHdfs.exists(s2path));
+    Assertions.assertTrue(webHdfs.exists(s2path));
 
     webHdfs.deleteSnapshot(foo, "s2");
     assertFalse(webHdfs.exists(s2path));
@@ -998,7 +994,7 @@
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
     final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
         WebHdfsConstants.WEBHDFS_SCHEME);
-    Assert.assertNull(webHdfs.getDelegationToken(null));
+    Assertions.assertNull(webHdfs.getDelegationToken(null));
   }
 
   @Test
@@ -1011,7 +1007,7 @@
       webHdfs.getDelegationToken(null);
       fail("No exception is thrown.");
     } catch (AccessControlException ace) {
-      Assert.assertTrue(ace.getMessage().startsWith(
+      Assertions.assertTrue(ace.getMessage().startsWith(
           WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
     }
   }
@@ -1037,12 +1033,12 @@
                 new LengthParam((long) LENGTH)));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     conn.setInstanceFollowRedirects(true);
-    Assert.assertEquals(LENGTH, conn.getContentLength());
+    Assertions.assertEquals(LENGTH, conn.getContentLength());
     byte[] subContents = new byte[LENGTH];
     byte[] realContents = new byte[LENGTH];
     System.arraycopy(CONTENTS, OFFSET, subContents, 0, LENGTH);
     IOUtils.readFully(conn.getInputStream(), realContents);
-    Assert.assertArrayEquals(subContents, realContents);
+    Assertions.assertArrayEquals(subContents, realContents);
   }
 
   @Test
@@ -1056,7 +1052,7 @@
     dfs.mkdirs(path);
     dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
     ContentSummary contentSummary = webHdfs.getContentSummary(path);
-    Assert
+    Assertions
         .assertTrue((contentSummary.getTypeQuota(StorageType.DISK) == 100000));
   }
 
@@ -1193,16 +1189,16 @@
       byte[] buf = new byte[1024];
       try {
         in.readFully(1020, buf, 0, 5);
-        Assert.fail("EOF expected");
+        Assertions.fail("EOF expected");
       } catch (EOFException ignored) {}
 
       // mix pread with stateful read
       int length = in.read(buf, 0, 512);
       in.readFully(100, new byte[1024], 0, 100);
       int preadLen = in.read(200, new byte[1024], 0, 200);
-      Assert.assertTrue(preadLen > 0);
+      Assertions.assertTrue(preadLen > 0);
       IOUtils.readFully(in, buf, length, 1024 - length);
-      Assert.assertArrayEquals(content, buf);
+      Assertions.assertArrayEquals(content, buf);
     } finally {
       if (in != null) {
         in.close();
@@ -1254,7 +1250,7 @@
     BlockLocation[] locations = fs.getFileBlockLocations(PATH, OFFSET, LENGTH);
     for (BlockLocation location : locations) {
       StorageType[] storageTypes = location.getStorageTypes();
-      Assert.assertTrue(storageTypes != null && storageTypes.length > 0
+      Assertions.assertTrue(storageTypes != null && storageTypes.length > 0
           && storageTypes[0] == StorageType.DISK);
     }
   }
@@ -1362,19 +1358,19 @@
     for(int i=0; i<locations1.length; i++) {
       BlockLocation location1 = locations1[i];
       BlockLocation location2 = locations2[i];
-      Assert.assertEquals(location1.getLength(),
+      Assertions.assertEquals(location1.getLength(),
           location2.getLength());
-      Assert.assertEquals(location1.getOffset(),
+      Assertions.assertEquals(location1.getOffset(),
           location2.getOffset());
-      Assert.assertArrayEquals(location1.getCachedHosts(),
+      Assertions.assertArrayEquals(location1.getCachedHosts(),
           location2.getCachedHosts());
-      Assert.assertArrayEquals(location1.getHosts(),
+      Assertions.assertArrayEquals(location1.getHosts(),
           location2.getHosts());
-      Assert.assertArrayEquals(location1.getNames(),
+      Assertions.assertArrayEquals(location1.getNames(),
           location2.getNames());
-      Assert.assertArrayEquals(location1.getTopologyPaths(),
+      Assertions.assertArrayEquals(location1.getTopologyPaths(),
           location2.getTopologyPaths());
-      Assert.assertArrayEquals(location1.getStorageTypes(),
+      Assertions.assertArrayEquals(location1.getStorageTypes(),
           location2.getStorageTypes());
     }
   }
@@ -1438,18 +1434,18 @@
 
     // get file status and check that it was written properly.
     final FileStatus s1 = fs.getFileStatus(file1);
-    assertEquals("Write failed for file " + file1, length, s1.getLen());
+      assertEquals(length, s1.getLen(), "Write failed for file " + file1);
 
     // Ensure file can be read through WebHdfsInputStream
     FSDataInputStream in = fs.open(file1);
-    assertTrue("Input stream is not an instance of class WebHdfsInputStream",
-        in.getWrappedStream() instanceof WebHdfsInputStream);
+      assertTrue(
+              in.getWrappedStream() instanceof WebHdfsInputStream, "Input stream is not an instance of class WebHdfsInputStream");
     int count = 0;
     for (; in.read() != -1; count++)
       ;
-    assertEquals("Read failed for file " + file1, s1.getLen(), count);
-    assertEquals("Sghould not be able to read beyond end of file", in.read(),
-        -1);
+      assertEquals(s1.getLen(), count, "Read failed for file " + file1);
+      assertEquals(in.read(),
+              -1, "Sghould not be able to read beyond end of file");
     in.close();
     try {
       in.read();
@@ -1538,9 +1534,9 @@
     } catch (Exception e) {
       assertTrue(e.getMessage().contains(msg));
     }
-    assertEquals(msg + ": Read should " + (shouldAttemptRetry ? "" : "not ")
-                + "have called shouldRetry. ",
-        attemptedRetry, shouldAttemptRetry);
+      assertEquals(
+              attemptedRetry, shouldAttemptRetry, msg + ": Read should " + (shouldAttemptRetry ? "" : "not ")
+              + "have called shouldRetry. ");
 
     verify(rr, times(numTimesTried)).getResponse((HttpURLConnection) any());
     webIn.close();
@@ -1554,21 +1550,21 @@
     conn.setInstanceFollowRedirects(false);
     String response = IOUtils.toString(conn.getInputStream());
     LOG.info("Response was : " + response);
-    Assert.assertEquals(
-      "Response wasn't " + HttpURLConnection.HTTP_OK,
-      HttpURLConnection.HTTP_OK, conn.getResponseCode());
+      Assertions.assertEquals(
+              HttpURLConnection.HTTP_OK, conn.getResponseCode(),
+              "Response wasn't " + HttpURLConnection.HTTP_OK);
 
     JSONObject responseJson = new JSONObject(response);
-    Assert.assertTrue("Response didn't give us a location. " + response,
-      responseJson.has("Location"));
+      Assertions.assertTrue(
+              responseJson.has("Location"), "Response didn't give us a location. " + response);
 
     //Test that the DN allows CORS on Create
     if(TYPE.equals("CREATE")) {
       URL dnLocation = new URL(responseJson.getString("Location"));
       HttpURLConnection dnConn = (HttpURLConnection) dnLocation.openConnection();
       dnConn.setRequestMethod("OPTIONS");
-      Assert.assertEquals("Datanode url : " + dnLocation + " didn't allow "
-        + "CORS", HttpURLConnection.HTTP_OK, dnConn.getResponseCode());
+        Assertions.assertEquals(HttpURLConnection.HTTP_OK, dnConn.getResponseCode(), "Datanode url : " + dnLocation + " didn't allow "
+                + "CORS");
     }
   }
 
@@ -1720,7 +1716,7 @@
         WebHdfsConstants.WEBHDFS_SCHEME);
 
     // test getAllStoragePolicies
-    Assert.assertTrue(Arrays.equals(dfs.getAllStoragePolicies().toArray(),
+    Assertions.assertTrue(Arrays.equals(dfs.getAllStoragePolicies().toArray(),
         webHdfs.getAllStoragePolicies().toArray()));
 
     // test get/set/unset policies
@@ -1733,12 +1729,12 @@
     BlockStoragePolicySpi dfsPolicy = dfs.getStoragePolicy(path);
     // get policy from webhdfs
     BlockStoragePolicySpi webHdfsPolicy = webHdfs.getStoragePolicy(path);
-    Assert.assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(),
+    Assertions.assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(),
         webHdfsPolicy.getName());
-    Assert.assertEquals(webHdfsPolicy, dfsPolicy);
+    Assertions.assertEquals(webHdfsPolicy, dfsPolicy);
     // unset policy
     webHdfs.unsetStoragePolicy(path);
-    Assert.assertEquals(defaultdfsPolicy, webHdfs.getStoragePolicy(path));
+    Assertions.assertEquals(defaultdfsPolicy, webHdfs.getStoragePolicy(path));
   }
 
   @Test
@@ -1755,7 +1751,7 @@
           HdfsConstants.COLD_STORAGE_POLICY_NAME);
       fail("Should throw exception, when storage policy disabled");
     } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains(
+      Assertions.assertTrue(e.getMessage().contains(
           "Failed to set storage policy since"));
     }
   }
@@ -1769,14 +1765,14 @@
       if (policy.getPolicy().getName().equals(ecpolicy)) {
         found = true;
         if (state.equals("disable")) {
-          Assert.assertTrue(policy.isDisabled());
+          Assertions.assertTrue(policy.isDisabled());
         } else if (state.equals("enable")) {
-          Assert.assertTrue(policy.isEnabled());
+          Assertions.assertTrue(policy.isEnabled());
         }
         break;
       }
     }
-    Assert.assertTrue(found);
+    Assertions.assertTrue(found);
   }
 
   // Test For Enable/Disable EC Policy in DFS.
@@ -1925,36 +1921,36 @@
 
   private void compareFsServerDefaults(FsServerDefaults serverDefaults1,
       FsServerDefaults serverDefaults2) throws Exception {
-    Assert.assertEquals("Block size is different",
-        serverDefaults1.getBlockSize(),
-        serverDefaults2.getBlockSize());
-    Assert.assertEquals("Bytes per checksum are different",
-        serverDefaults1.getBytesPerChecksum(),
-        serverDefaults2.getBytesPerChecksum());
-    Assert.assertEquals("Write packet size is different",
-        serverDefaults1.getWritePacketSize(),
-        serverDefaults2.getWritePacketSize());
-    Assert.assertEquals("Default replication is different",
-        serverDefaults1.getReplication(),
-        serverDefaults2.getReplication());
-    Assert.assertEquals("File buffer size are different",
-        serverDefaults1.getFileBufferSize(),
-        serverDefaults2.getFileBufferSize());
-    Assert.assertEquals("Encrypt data transfer key is different",
-        serverDefaults1.getEncryptDataTransfer(),
-        serverDefaults2.getEncryptDataTransfer());
-    Assert.assertEquals("Trash interval is different",
-        serverDefaults1.getTrashInterval(),
-        serverDefaults2.getTrashInterval());
-    Assert.assertEquals("Checksum type is different",
-        serverDefaults1.getChecksumType(),
-        serverDefaults2.getChecksumType());
-    Assert.assertEquals("Key provider uri is different",
-        serverDefaults1.getKeyProviderUri(),
-        serverDefaults2.getKeyProviderUri());
-    Assert.assertEquals("Default storage policy is different",
-        serverDefaults1.getDefaultStoragePolicyId(),
-        serverDefaults2.getDefaultStoragePolicyId());
+      Assertions.assertEquals(
+              serverDefaults1.getBlockSize(),
+              serverDefaults2.getBlockSize(), "Block size is different");
+      Assertions.assertEquals(
+              serverDefaults1.getBytesPerChecksum(),
+              serverDefaults2.getBytesPerChecksum(), "Bytes per checksum are different");
+      Assertions.assertEquals(
+              serverDefaults1.getWritePacketSize(),
+              serverDefaults2.getWritePacketSize(), "Write packet size is different");
+      Assertions.assertEquals(
+              serverDefaults1.getReplication(),
+              serverDefaults2.getReplication(), "Default replication is different");
+      Assertions.assertEquals(
+              serverDefaults1.getFileBufferSize(),
+              serverDefaults2.getFileBufferSize(), "File buffer size are different");
+      Assertions.assertEquals(
+              serverDefaults1.getEncryptDataTransfer(),
+              serverDefaults2.getEncryptDataTransfer(), "Encrypt data transfer key is different");
+      Assertions.assertEquals(
+              serverDefaults1.getTrashInterval(),
+              serverDefaults2.getTrashInterval(), "Trash interval is different");
+      Assertions.assertEquals(
+              serverDefaults1.getChecksumType(),
+              serverDefaults2.getChecksumType(), "Checksum type is different");
+      Assertions.assertEquals(
+              serverDefaults1.getKeyProviderUri(),
+              serverDefaults2.getKeyProviderUri(), "Key provider uri is different");
+      Assertions.assertEquals(
+              serverDefaults1.getDefaultStoragePolicyId(),
+              serverDefaults2.getDefaultStoragePolicyId(), "Default storage policy is different");
   }
 
   /**
@@ -1974,7 +1970,7 @@
         .thenThrow(new UnsupportedOperationException());
     try {
       webfs.getServerDefaults();
-      Assert.fail("should have thrown UnSupportedOperationException.");
+      Assertions.fail("should have thrown UnSupportedOperationException.");
     } catch (UnsupportedOperationException uoe) {
       // Expected exception.
     }
@@ -2008,7 +2004,7 @@
 
     // get file status and check that it was written properly.
     final FileStatus s1 = fs.getFileStatus(file1);
-    assertEquals("Write failed for file " + file1, length, s1.getLen());
+      assertEquals(length, s1.getLen(), "Write failed for file " + file1);
 
     FSDataInputStream in = fs.open(file1);
     in.read(); // Connection is made only when the first read() occurs.
@@ -2073,21 +2069,21 @@
     conn.setInstanceFollowRedirects(false);
     String listStatusResponse = IOUtils.toString(conn.getInputStream(),
         StandardCharsets.UTF_8);
-    Assert.assertEquals("Response wasn't " + HttpURLConnection.HTTP_OK,
-        HttpURLConnection.HTTP_OK, conn.getResponseCode());
+      Assertions.assertEquals(
+              HttpURLConnection.HTTP_OK, conn.getResponseCode(), "Response wasn't " + HttpURLConnection.HTTP_OK);
 
     // Verify that ecPolicy is set in the ListStatus response for ec file
     String ecpolicyForECfile = getECPolicyFromFileStatusJson(
         getFileStatusJson(listStatusResponse, ecFile.getName()));
-    assertEquals("EC policy for ecFile should match the set EC policy",
-        ecpolicyForECfile, ecPolicyName);
+      assertEquals(
+              ecpolicyForECfile, ecPolicyName, "EC policy for ecFile should match the set EC policy");
 
     // Verify that ecPolicy is not set in the ListStatus response for non-ec
     // file
     String ecPolicyForNonECfile = getECPolicyFromFileStatusJson(
         getFileStatusJson(listStatusResponse, nonEcFile.getName()));
-    assertEquals("EC policy for nonEcFile should be null (not set)",
-        ecPolicyForNonECfile, null);
+      assertEquals(
+              ecPolicyForNonECfile, null, "EC policy for nonEcFile should be null (not set)");
 
     // Query webhdfs REST API to get fileStatus for ecFile
     URL getFileStatusUrl = new URL("http", addr.getHostString(), addr.getPort(),
@@ -2099,15 +2095,15 @@
     conn.setInstanceFollowRedirects(false);
     String getFileStatusResponse = IOUtils.toString(conn.getInputStream(),
         StandardCharsets.UTF_8);
-    Assert.assertEquals("Response wasn't " + HttpURLConnection.HTTP_OK,
-        HttpURLConnection.HTTP_OK, conn.getResponseCode());
+      Assertions.assertEquals(
+              HttpURLConnection.HTTP_OK, conn.getResponseCode(), "Response wasn't " + HttpURLConnection.HTTP_OK);
 
     // Verify that ecPolicy is set in getFileStatus response for ecFile
     JSONObject fileStatusObject = new JSONObject(getFileStatusResponse)
         .getJSONObject("FileStatus");
     ecpolicyForECfile = getECPolicyFromFileStatusJson(fileStatusObject);
-    assertEquals("EC policy for ecFile should match the set EC policy",
-        ecpolicyForECfile, ecPolicyName);
+      assertEquals(
+              ecpolicyForECfile, ecPolicyName, "EC policy for ecFile should match the set EC policy");
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java
index a285cd3..3fa5ccc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java
@@ -19,16 +19,16 @@
 
 import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests ACL APIs via WebHDFS.
  */
 public class TestWebHDFSAcl extends FSAclBaseTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = WebHdfsTestUtil.createConf();
     startCluster();
@@ -40,7 +40,7 @@
    */
   @Override
   @Test
-  @Ignore
+  @Disabled
   public void testDefaultAclNewSymlinkIntermediate() {
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
index 1fc54d6..9dadf29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
@@ -54,8 +54,8 @@
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.eclipse.jetty.util.ajax.JSON;
 
 import javax.servlet.http.HttpServletResponse;
@@ -87,13 +87,13 @@
       cluster.transitionToActive(0);
 
       final Path dir = new Path("/test");
-      Assert.assertTrue(fs.mkdirs(dir));
+      Assertions.assertTrue(fs.mkdirs(dir));
 
       cluster.shutdownNameNode(0);
       cluster.transitionToActive(1);
 
       final Path dir2 = new Path("/test2");
-      Assert.assertTrue(fs.mkdirs(dir2));
+      Assertions.assertTrue(fs.mkdirs(dir2));
     } finally {
       IOUtils.cleanupWithLogger(null, fs);
       if (cluster != null) {
@@ -174,7 +174,7 @@
       } catch (IOException e) {
         // Mimic the UserProvider class logic (server side) by throwing
         // SecurityException here
-        Assert.assertTrue(e instanceof SecretManager.InvalidToken);
+        Assertions.assertTrue(e instanceof SecretManager.InvalidToken);
         resp = eh.toResponse(new SecurityException(e));
       }
       // The Response (resp) below is what the server will send to client
@@ -197,7 +197,7 @@
       Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
       RemoteException re = JsonUtilClient.toRemoteException(m);
       Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
-      Assert.assertTrue(unwrapped instanceof StandbyException);
+      Assertions.assertTrue(unwrapped instanceof StandbyException);
     } finally {
       IOUtils.cleanupWithLogger(null, fs);
       if (cluster != null) {
@@ -236,7 +236,7 @@
       FSDataInputStream in = fs.open(p);
       byte[] buf = new byte[data.length];
       IOUtils.readFully(in, buf, 0, buf.length);
-      Assert.assertArrayEquals(data, buf);
+      Assertions.assertArrayEquals(data, buf);
     } finally {
       IOUtils.cleanupWithLogger(null, fs);
       if (cluster != null) {
@@ -262,7 +262,7 @@
       DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");
 
       fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
-      Assert.assertEquals(2, fs.getResolvedNNAddr().length);
+      Assertions.assertEquals(2, fs.getResolvedNNAddr().length);
     } finally {
       IOUtils.cleanupWithLogger(null, fs);
       if (cluster != null) {
@@ -319,7 +319,7 @@
         while (!resultMap.containsKey("mkdirs")) {
           this.wait();
         }
-        Assert.assertTrue(resultMap.get("mkdirs"));
+        Assertions.assertTrue(resultMap.get("mkdirs"));
       }
     } finally {
       if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
index 722ebad..f2c9d49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs.web;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.BufferedReader;
 import java.io.FileNotFoundException;
@@ -52,9 +52,9 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
   private static final Configuration conf = new Configuration();
@@ -76,7 +76,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     //get file system as a non-superuser
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();
@@ -148,13 +148,13 @@
       String names2[] = computed[i].getNames();
       Arrays.sort(names1);
       Arrays.sort(names2);
-      Assert.assertArrayEquals("Names differ", names1, names2);
+        Assertions.assertArrayEquals(names1, names2, "Names differ");
       // Check topology
       String topos1[] = expected[i].getTopologyPaths();
       String topos2[] = computed[i].getTopologyPaths();
       Arrays.sort(topos1);
       Arrays.sort(topos2);
-      Assert.assertArrayEquals("Topology differs", topos1, topos2);
+        Assertions.assertArrayEquals(topos1, topos2, "Topology differs");
     }
   }
 
@@ -237,8 +237,8 @@
       in.close();
   
       for (int i = 0; i < buf.length; i++) {
-        assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
-            mydata[i + offset], buf[i]);
+          assertEquals(
+                  mydata[i + offset], buf[i], "Position " + i + ", offset=" + offset + ", length=" + len);
       }
     }
 
@@ -252,8 +252,8 @@
       in.close();
   
       for (int i = 0; i < buf.length; i++) {
-        assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
-            mydata[i + offset], buf[i]);
+          assertEquals(
+                  mydata[i + offset], buf[i], "Position " + i + ", offset=" + offset + ", length=" + len);
       }
     }
   }
@@ -266,7 +266,7 @@
     final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
     final URL url = webhdfs.toUrl(GetOpParam.Op.NULL, root);
     WebHdfsFileSystem.LOG.info("null url=" + url);
-    Assert.assertTrue(url.toString().contains("v1"));
+    Assertions.assertTrue(url.toString().contains("v1"));
 
     //test root permission
     final FileStatus status = fs.getFileStatus(root);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index a693ac3..244894c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs.web;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.BufferedReader;
 import java.io.IOException;
@@ -47,14 +47,14 @@
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
 import org.junit.AssumptionViolatedException;
-import org.junit.Before;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameter;
 import org.junit.runners.Parameterized.Parameters;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This test suite checks that WebHdfsFileSystem sets connection timeouts and
@@ -107,7 +107,7 @@
   @Parameter
   public TimeoutSource timeoutSource;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     Configuration conf = WebHdfsTestUtil.createConf();
     serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
@@ -129,7 +129,7 @@
     failedToConsumeBacklog = false;
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     IOUtils.cleanupWithLogger(
         LOG, clients.toArray(new SocketChannel[clients.size()]));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
index cb62288..1873e69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.hdfs.web;
 
 import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 
 import java.io.IOException;
@@ -57,15 +55,15 @@
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestWebHdfsUrl {
   // NOTE: port is never used 
   final URI uri = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://" + "127.0.0.1:0");
 
-  @Before
+  @BeforeEach
   public void resetUGI() {
     UserGroupInformation.setConfiguration(new Configuration());
   }
@@ -82,7 +80,7 @@
     Path fsPath = new Path(pathName);
     URL encodedPathUrl = webhdfs.toUrl(PutOpParam.Op.CREATE, fsPath);
     // We should get back the original file path after cycling back and decoding
-    Assert.assertEquals(WebHdfsFileSystem.PATH_PREFIX + pathName,
+    Assertions.assertEquals(WebHdfsFileSystem.PATH_PREFIX + pathName,
         encodedPathUrl.toURI().getPath());
   }
 
@@ -150,8 +148,8 @@
 
     // send user
     URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath);
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        getTokenUrl.toString().indexOf(userParam) == -1);
+      assertTrue(
+              getTokenUrl.toString().indexOf(userParam) == -1, "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(),
@@ -163,8 +161,8 @@
     // send user
     URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        renewTokenUrl.toString().indexOf(userParam) == -1);
+      assertTrue(
+              renewTokenUrl.toString().indexOf(userParam) == -1, "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(),
@@ -177,8 +175,8 @@
     // send token
     URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        cancelTokenUrl.toString().indexOf(userParam) == -1);
+      assertTrue(
+              cancelTokenUrl.toString().indexOf(userParam) == -1, "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
@@ -202,8 +200,8 @@
     // send user
     cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        cancelTokenUrl.toString().indexOf(userParam) == -1);
+      assertTrue(
+              cancelTokenUrl.toString().indexOf(userParam) == -1, "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
@@ -244,8 +242,8 @@
 
     // send real+effective
     URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath);
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        getTokenUrl.toString().indexOf(userParam) == -1);
+      assertTrue(
+              getTokenUrl.toString().indexOf(userParam) == -1, "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(),
@@ -258,8 +256,8 @@
     // send real+effective
     URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        renewTokenUrl.toString().indexOf(userParam) == -1);
+      assertTrue(
+              renewTokenUrl.toString().indexOf(userParam) == -1, "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(),
@@ -272,8 +270,8 @@
     // send token
     URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        cancelTokenUrl.toString().indexOf(userParam) == -1);
+      assertTrue(
+              cancelTokenUrl.toString().indexOf(userParam) == -1, "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
@@ -298,8 +296,8 @@
     // send real+effective
     cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        cancelTokenUrl.toString().indexOf(userParam) == -1);
+      assertTrue(
+              cancelTokenUrl.toString().indexOf(userParam) == -1, "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
@@ -421,7 +419,7 @@
 
       //get file status and check that it was written properly.
       final FileStatus s1 = fs.getFileStatus(file1);
-      assertEquals("Write failed for file " + file1, length, s1.getLen());
+        assertEquals(length, s1.getLen(), "Write failed for file " + file1);
 
       boolean found = false;
       RemoteIterator<LocatedFileStatus> statusRemoteIterator =
@@ -434,7 +432,7 @@
           found = true;
         }
       }
-      assertFalse("Could not find file with special character", !found);
+        assertFalse(!found, "Could not find file with special character");
     } finally {
       cluster.shutdown();
     }
@@ -472,7 +470,7 @@
 
       //get file status and check that it was written properly.
       final FileStatus s1 = fs.getFileStatus(file1);
-      assertEquals("Write failed for file " + file1, length, s1.getLen());
+        assertEquals(length, s1.getLen(), "Write failed for file " + file1);
 
       boolean found = false;
       RemoteIterator<LocatedFileStatus> statusRemoteIterator =
@@ -485,7 +483,7 @@
           found = true;
         }
       }
-      assertFalse("Could not find file with special character", !found);
+        assertFalse(!found, "Could not find file with special character");
     } finally {
       cluster.shutdown();
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java
index 106f368..c53bd24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java
@@ -38,10 +38,10 @@
 import org.apache.hadoop.http.FilterInitializer;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 public class TestWebHdfsWithAuthenticationFilter {
   private static boolean authorized = false;
@@ -83,7 +83,7 @@
   private static MiniDFSCluster cluster;
   private static FileSystem fs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws IOException {
     conf = new Configuration();
     conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
@@ -96,7 +96,7 @@
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     if (fs != null) {
       fs.close();
@@ -112,7 +112,7 @@
     authorized = false;
     try {
       fs.getFileStatus(new Path("/"));
-      Assert.fail("The filter fails to block the request");
+      Assertions.fail("The filter fails to block the request");
     } catch (IOException e) {
     }
     authorized = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
index 119dc50..8ec9370 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
@@ -33,10 +33,10 @@
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test WebHDFS with multiple NameNodes
@@ -55,7 +55,7 @@
   private static MiniDFSCluster cluster;
   private static WebHdfsFileSystem[] webhdfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setupTest() {
     setLogLevel();
     try {
@@ -84,7 +84,7 @@
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -126,14 +126,14 @@
     for(int i = 0; i < webhdfs.length; i++) {
       //check file length
       final long expected = writeStrings[i].length();
-      Assert.assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
+      Assertions.assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
     }
 
     //test read: check file content for each namenode
     for(int i = 0; i < webhdfs.length; i++) {
       final FSDataInputStream in = webhdfs[i].open(p);
       for(int c, j = 0; (c = in.read()) != -1; j++) {
-        Assert.assertEquals(writeStrings[i].charAt(j), c);
+        Assertions.assertEquals(writeStrings[i].charAt(j), c);
       }
       in.close();
     }
@@ -148,7 +148,7 @@
     for(int i = 0; i < webhdfs.length; i++) {
       //check file length
       final long expected = writeStrings[i].length() + appendStrings[i].length();
-      Assert.assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
+      Assertions.assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
     }
 
     //test read: check file content for each namenode
@@ -159,8 +159,8 @@
         b.append((char)c);
       }
       final int wlen = writeStrings[i].length();
-      Assert.assertEquals(writeStrings[i], b.substring(0, wlen));
-      Assert.assertEquals(appendStrings[i], b.substring(wlen));
+      Assertions.assertEquals(writeStrings[i], b.substring(0, wlen));
+      Assertions.assertEquals(appendStrings[i], b.substring(wlen));
       in.close();
     }
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java
index a1c27f5..1936ef8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java
@@ -20,8 +20,8 @@
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPSERVER_FILTER_HANDLERS;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -36,10 +36,10 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -87,7 +87,7 @@
         {false, false, true}});
   }
 
-  @Before
+  @BeforeEach
   public void before() throws Exception {
     Configuration nnConf = new Configuration();
     nnConf.setBoolean(DFS_WEBHDFS_REST_CSRF_ENABLED_KEY, nnRestCsrf);
@@ -113,7 +113,7 @@
         NetUtils.getHostPortString(addr)), clientConf);
   }
 
-  @After
+  @AfterEach
   public void after() {
     IOUtils.closeStream(webhdfs);
     IOUtils.closeStream(fs);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
index 95078a5..a60fc5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -40,8 +38,8 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 public class TestParam {
   public static final Logger LOG = LoggerFactory.getLogger(TestParam.class);
@@ -51,13 +49,13 @@
   @Test
   public void testAccessTimeParam() {
     final AccessTimeParam p = new AccessTimeParam(AccessTimeParam.DEFAULT);
-    Assert.assertEquals(-1L, p.getValue().longValue());
+    Assertions.assertEquals(-1L, p.getValue().longValue());
 
     new AccessTimeParam(-1L);
 
     try {
       new AccessTimeParam(-2L);
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -66,8 +64,8 @@
   @Test
   public void testBlockSizeParam() {
     final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
-    Assert.assertEquals(
+    Assertions.assertEquals(null, p.getValue());
+    Assertions.assertEquals(
         conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
             DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
         p.getValue(conf));
@@ -76,7 +74,7 @@
 
     try {
       new BlockSizeParam(0L);
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -85,8 +83,8 @@
   @Test
   public void testBufferSizeParam() {
     final BufferSizeParam p = new BufferSizeParam(BufferSizeParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
-    Assert.assertEquals(
+    Assertions.assertEquals(null, p.getValue());
+    Assertions.assertEquals(
         conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
             CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),
         p.getValue(conf));
@@ -95,7 +93,7 @@
 
     try {
       new BufferSizeParam(0);
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -104,19 +102,19 @@
   @Test
   public void testDelegationParam() {
     final DelegationParam p = new DelegationParam(DelegationParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    Assertions.assertEquals(null, p.getValue());
   }
 
   @Test
   public void testDestinationParam() {
     final DestinationParam p = new DestinationParam(DestinationParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    Assertions.assertEquals(null, p.getValue());
 
     new DestinationParam("/abc");
 
     try {
       new DestinationParam("abc");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -125,19 +123,19 @@
   @Test
   public void testGroupParam() {
     final GroupParam p = new GroupParam(GroupParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    Assertions.assertEquals(null, p.getValue());
   }
 
   @Test
   public void testModificationTimeParam() {
     final ModificationTimeParam p = new ModificationTimeParam(ModificationTimeParam.DEFAULT);
-    Assert.assertEquals(-1L, p.getValue().longValue());
+    Assertions.assertEquals(-1L, p.getValue().longValue());
 
     new ModificationTimeParam(-1L);
 
     try {
       new ModificationTimeParam(-2L);
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -146,13 +144,13 @@
   @Test
   public void testOverwriteParam() {
     final OverwriteParam p = new OverwriteParam(OverwriteParam.DEFAULT);
-    Assert.assertEquals(false, p.getValue());
+    Assertions.assertEquals(false, p.getValue());
 
     new OverwriteParam("trUe");
 
     try {
       new OverwriteParam("abc");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -161,20 +159,20 @@
   @Test
   public void testOwnerParam() {
     final OwnerParam p = new OwnerParam(OwnerParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    Assertions.assertEquals(null, p.getValue());
   }
 
   @Test
   public void testPermissionParam() {
     final PermissionParam p = new PermissionParam(PermissionParam.DEFAULT);
-    Assert.assertEquals(new FsPermission((short)0755), p.getDirFsPermission());
-    Assert.assertEquals(new FsPermission((short)0644), p.getFileFsPermission());
+    Assertions.assertEquals(new FsPermission((short)0755), p.getDirFsPermission());
+    Assertions.assertEquals(new FsPermission((short)0644), p.getFileFsPermission());
 
     new PermissionParam("0");
 
     try {
       new PermissionParam("-1");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -183,21 +181,21 @@
 
     try {
       new PermissionParam("2000");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new PermissionParam("8");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new PermissionParam("abc");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -206,13 +204,13 @@
   @Test
   public void testRecursiveParam() {
     final RecursiveParam p = new RecursiveParam(RecursiveParam.DEFAULT);
-    Assert.assertEquals(false, p.getValue());
+    Assertions.assertEquals(false, p.getValue());
 
     new RecursiveParam("falSe");
 
     try {
       new RecursiveParam("abc");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -221,14 +219,14 @@
   @Test
   public void testRenewerParam() {
     final RenewerParam p = new RenewerParam(RenewerParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    Assertions.assertEquals(null, p.getValue());
   }
 
   @Test
   public void testReplicationParam() {
     final ReplicationParam p = new ReplicationParam(ReplicationParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
-    Assert.assertEquals(
+    Assertions.assertEquals(null, p.getValue());
+    Assertions.assertEquals(
         (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
             DFSConfigKeys.DFS_REPLICATION_DEFAULT),
         p.getValue(conf));
@@ -237,7 +235,7 @@
 
     try {
       new ReplicationParam((short)0);
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -250,7 +248,7 @@
     Param<?, ?> equalParam = new RenewerParam("renewer=equal");
     final String expected = "&renewer=renewer%3Dequal&token=token%26ampersand";
     final String actual = Param.toSortedString(sep, equalParam, ampParam);
-    Assert.assertEquals(expected, actual);
+    Assertions.assertEquals(expected, actual);
   }
 
   @Test
@@ -293,7 +291,7 @@
 
       final String expected = StringUtils.join(",", Arrays.asList(sub));
       final ConcatSourcesParam computed = new ConcatSourcesParam(paths);
-      Assert.assertEquals(expected, computed.getValue());
+      Assertions.assertEquals(expected, computed.getValue());
     }
   }
 
@@ -319,13 +317,13 @@
     List<AclEntry> setAclList =
         AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",
             true);
-    Assert.assertEquals(setAclList.toString(), p.getAclPermission(true)
+    Assertions.assertEquals(setAclList.toString(), p.getAclPermission(true)
         .toString());
 
     new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
     try {
       new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
-      Assert.fail();
+      Assertions.fail();
     } catch (IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -335,21 +333,21 @@
 
     try {
       new AclPermissionParam("user:r-,group:rwx,other:rw-");
-      Assert.fail();
+      Assertions.fail();
     } catch (IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
-      Assert.fail();
+      Assertions.fail();
     } catch (IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
-      Assert.fail();
+      Assertions.fail();
     } catch (IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -375,12 +373,12 @@
       String numericUserSpec = "user:110201:rwx";
       AclPermissionParam aclNumericUserParam =
           new AclPermissionParam(numericUserSpec);
-      Assert.assertEquals(numericUserSpec, aclNumericUserParam.getValue());
+      Assertions.assertEquals(numericUserSpec, aclNumericUserParam.getValue());
 
       String oddGroupSpec = "group:foo@bar:rwx";
       AclPermissionParam aclGroupWithDomainParam =
           new AclPermissionParam(oddGroupSpec);
-      Assert.assertEquals(oddGroupSpec, aclGroupWithDomainParam.getValue());
+      Assertions.assertEquals(oddGroupSpec, aclGroupWithDomainParam.getValue());
 
     } finally {
       // Revert back to the default rules for remainder of tests
@@ -392,22 +390,22 @@
   @Test
   public void testXAttrNameParam() {
     final XAttrNameParam p = new XAttrNameParam("user.a1");
-    Assert.assertEquals(p.getXAttrName(), "user.a1");
+    Assertions.assertEquals(p.getXAttrName(), "user.a1");
   }
   
   @Test
   public void testXAttrValueParam() throws IOException {
     final XAttrValueParam p = new XAttrValueParam("0x313233");
-    Assert.assertArrayEquals(p.getXAttrValue(), 
+    Assertions.assertArrayEquals(p.getXAttrValue(), 
         XAttrCodec.decodeValue("0x313233"));
   }
   
   @Test
   public void testXAttrEncodingParam() {
     final XAttrEncodingParam p = new XAttrEncodingParam(XAttrCodec.BASE64);
-    Assert.assertEquals(p.getEncoding(), XAttrCodec.BASE64);
+    Assertions.assertEquals(p.getEncoding(), XAttrCodec.BASE64);
     final XAttrEncodingParam p1 = new XAttrEncodingParam(p.getValueString());
-    Assert.assertEquals(p1.getEncoding(), XAttrCodec.BASE64);
+    Assertions.assertEquals(p1.getEncoding(), XAttrCodec.BASE64);
   }
   
   @Test
@@ -415,9 +413,9 @@
     EnumSet<XAttrSetFlag> flag = EnumSet.of(
         XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE);
     final XAttrSetFlagParam p = new XAttrSetFlagParam(flag);
-    Assert.assertEquals(p.getFlag(), flag);
+    Assertions.assertEquals(p.getFlag(), flag);
     final XAttrSetFlagParam p1 = new XAttrSetFlagParam(p.getValueString());
-    Assert.assertEquals(p1.getFlag(), flag);
+    Assertions.assertEquals(p1.getFlag(), flag);
   }
   
   @Test
@@ -426,7 +424,7 @@
         Options.Rename.OVERWRITE, Options.Rename.NONE);
     final RenameOptionSetParam p1 = new RenameOptionSetParam(
         p.getValueString());
-    Assert.assertEquals(p1.getValue(), EnumSet.of(
+    Assertions.assertEquals(p1.getValue(), EnumSet.of(
         Options.Rename.OVERWRITE, Options.Rename.NONE));
   }
 
@@ -434,8 +432,8 @@
   public void testSnapshotNameParam() {
     final OldSnapshotNameParam s1 = new OldSnapshotNameParam("s1");
     final SnapshotNameParam s2 = new SnapshotNameParam("s2");
-    Assert.assertEquals("s1", s1.getValue());
-    Assert.assertEquals("s2", s2.getValue());
+    Assertions.assertEquals("s1", s1.getValue());
+    Assertions.assertEquals("s2", s2.getValue());
   }
 
   @Test
@@ -451,42 +449,42 @@
 
     try {
       new FsActionParam("rw");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("qwx");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("qrwx");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("rwxx");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("xwr");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("r-w");
-      Assert.fail();
+      Assertions.fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -496,15 +494,15 @@
   public void testStartAfterParam() throws Exception {
     String s = "/helloWorld";
     StartAfterParam param = new StartAfterParam(s);
-    Assert.assertEquals(s, param.getValue());
+    Assertions.assertEquals(s, param.getValue());
   }
 
   @Test
   public void testStoragePolicyParam() {
     StoragePolicyParam p = new StoragePolicyParam(StoragePolicyParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    Assertions.assertEquals(null, p.getValue());
     p = new StoragePolicyParam("COLD");
-    Assert.assertEquals("COLD", p.getValue());
+    Assertions.assertEquals("COLD", p.getValue());
   }
 
   @Test
@@ -537,16 +535,16 @@
   @Test
   public void testECPolicyParam() {
     ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    Assertions.assertEquals(null, p.getValue());
     p = new ECPolicyParam("RS-6-3-1024k");
-    Assert.assertEquals("RS-6-3-1024k", p.getValue());
+    Assertions.assertEquals("RS-6-3-1024k", p.getValue());
   }
 
   @Test
   public void testHttpOpParams() {
     try {
       new PostOpParam("TEST");
-      Assert
+      Assertions
           .fail("Construct the PostOpParam with param value 'TEST' should be"
               + " failed.");
     } catch (IllegalArgumentException e) {
@@ -555,7 +553,7 @@
     }
     try {
       new PutOpParam("TEST");
-      Assert
+      Assertions
           .fail("Construct the PutOpParam with param value 'TEST' should be"
               + " failed.");
     } catch (IllegalArgumentException e) {
@@ -564,7 +562,7 @@
     }
     try {
       new DeleteOpParam("TEST");
-      Assert
+      Assertions
           .fail("Construct the DeleteOpParam with param value 'TEST' should be"
               + " failed.");
     } catch (IllegalArgumentException e) {
@@ -573,7 +571,7 @@
     }
     try {
       new GetOpParam("TEST");
-      Assert
+      Assertions
           .fail("Construct the GetOpParam with param value 'TEST' should be"
               + " failed.");
     } catch (IllegalArgumentException e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java
index 2a76604..c9e12e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java
@@ -29,12 +29,11 @@
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.sink.RollingFileSystemSinkTestBase.MyMetrics1;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Test the {@link RollingFileSystemSink} class in the context of HDFS.
@@ -51,7 +50,7 @@
    *
    * @throws IOException thrown if cluster creation fails
    */
-  @Before
+  @BeforeEach
   public void setupHdfs() throws IOException {
     Configuration conf = new Configuration();
 
@@ -66,7 +65,7 @@
   /**
    * Stop the {@link MiniDFSCluster}.
    */
-  @After
+  @AfterEach
   public void shutdownHdfs() {
     if (cluster != null) {
       cluster.shutdown();
@@ -156,8 +155,8 @@
 
     ms.publishMetricsNow(); // publish the metrics
 
-    assertTrue("No exception was generated while writing metrics "
-        + "even though HDFS was unavailable", MockSink.errored);
+      assertTrue(MockSink.errored, "No exception was generated while writing metrics "
+              + "even though HDFS was unavailable");
 
     try {
       ms.stop();
@@ -187,8 +186,8 @@
     try {
       ms.stop();
 
-      assertTrue("No exception was generated while stopping sink "
-          + "even though HDFS was unavailable", MockSink.errored);
+        assertTrue(MockSink.errored, "No exception was generated while stopping sink "
+                + "even though HDFS was unavailable");
     } catch (MetricsException ex) {
       // Expected
     } finally {
@@ -215,9 +214,9 @@
 
     ms.publishMetricsNow(); // publish the metrics
 
-    assertFalse("An exception was generated writing metrics "
-        + "while HDFS was unavailable, even though the sink is set to "
-        + "ignore errors", MockSink.errored);
+      assertFalse(MockSink.errored, "An exception was generated writing metrics "
+              + "while HDFS was unavailable, even though the sink is set to "
+              + "ignore errors");
 
     try {
       ms.stop();
@@ -247,9 +246,9 @@
     try {
       ms.stop();
 
-      assertFalse("An exception was generated stopping sink "
-          + "while HDFS was unavailable, even though the sink is set to "
-          + "ignore errors", MockSink.errored);
+        assertFalse(MockSink.errored, "An exception was generated stopping sink "
+                + "while HDFS was unavailable, even though the sink is set to "
+                + "ignore errors");
     } finally {
       ms.shutdown();
     }
@@ -299,10 +298,10 @@
           findMostRecentLogFile(fs, new Path(currentDir, getLogFilename()));
       FileStatus status = fs.getFileStatus(currentFile);
 
-      // Each metrics record is 118+ bytes, depending on hostname
-      assertTrue("The flusher thread didn't flush the log contents. Expected "
-          + "at least 236 bytes in the log file, but got " + status.getLen(),
-          status.getLen() >= 236);
+        // Each metrics record is 118+ bytes, depending on hostname
+        assertTrue(
+                status.getLen() >= 236, "The flusher thread didn't flush the log contents. Expected "
+                + "at least 236 bytes in the log file, but got " + status.getLen());
     } finally {
       RollingFileSystemSink.forceFlush = false;
 
@@ -326,9 +325,9 @@
     MockSink.errored = false;
     initMetricsSystem(path, true, false);
 
-    assertTrue("The sink was not initialized as expected",
-        MockSink.initialized);
-    assertFalse("The sink threw an unexpected error on initialization",
-        MockSink.errored);
+      assertTrue(
+              MockSink.initialized, "The sink was not initialized as expected");
+      assertFalse(
+              MockSink.errored, "The sink threw an unexpected error on initialization");
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java
index 906950b..bcb4701 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java
@@ -49,12 +49,9 @@
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Test;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import static org.junit.Assert.assertTrue;
+import org.junit.jupiter.api.*;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test the {@link RollingFileSystemSink} class in the context of HDFS with
@@ -77,7 +74,7 @@
    *
    * @throws Exception thrown if the KDC setup fails
    */
-  @BeforeClass
+  @BeforeAll
   public static void initKdc() throws Exception {
     Properties kdcConf = MiniKdc.createConf();
     kdc = new MiniKdc(kdcConf, ROOT_TEST_DIR);
@@ -101,7 +98,7 @@
    *
    * @throws Exception thrown if the cluster setup fails
    */
-  @Before
+  @BeforeEach
   public void initCluster() throws Exception {
     HdfsConfiguration conf = createSecureConfig("authentication,privacy");
 
@@ -117,7 +114,7 @@
   /**
    * Stop the mini-DFS cluster.
    */
-  @After
+  @AfterEach
   public void stopCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -132,7 +129,7 @@
   /**
    * Stop the mini-KDC.
    */
-  @AfterClass
+  @AfterAll
   public static void shutdownKdc() {
     if (kdc != null) {
       kdc.stop();
@@ -175,9 +172,9 @@
 
     initMetricsSystem(path, true, false);
 
-    assertTrue("No exception was generated initializing the sink against a "
-        + "secure cluster even though the principal and keytab properties "
-        + "were missing", MockSink.errored);
+      assertTrue(MockSink.errored, "No exception was generated initializing the sink against a "
+              + "secure cluster even though the principal and keytab properties "
+              + "were missing");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
index 5758fe79..d5ce9bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
@@ -18,10 +18,7 @@
 
 package org.apache.hadoop.net;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.Collection;
 import java.util.HashMap;
@@ -40,10 +37,10 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -59,7 +56,7 @@
   @Rule
   public Timeout testTimeout = new Timeout(30000, TimeUnit.MILLISECONDS);
 
-  @Before
+  @BeforeEach
   public void setupDatanodes() {
     dataNodes = new DatanodeDescriptor[] {
         DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
@@ -311,7 +308,7 @@
         }
       }
     }
-    assertTrue("Expected to find a different first location", foundRandom);
+      assertTrue(foundRandom, "Expected to find a different first location");
 
     // Array of just remote nodes
     // Expect random first node
@@ -330,7 +327,7 @@
         }
       }
     }
-    assertTrue("Expected to find a different first location", foundRandom);
+      assertTrue(foundRandom, "Expected to find a different first location");
 
     //Reader is not a datanode, but is in one of the datanode's rack.
     testNodes[0] = dataNodes[0];
@@ -454,18 +451,18 @@
     excludedNodes.add(dataNodes[18]);
     Map<Node, Integer> frequency = pickNodesAtRandom(100, scope, excludedNodes);
 
-    assertEquals("dn[3] should be excluded", 0,
-        frequency.get(dataNodes[3]).intValue());
-    assertEquals("dn[5] should be exclude18d", 0,
-        frequency.get(dataNodes[5]).intValue());
-    assertEquals("dn[7] should be excluded", 0,
-        frequency.get(dataNodes[7]).intValue());
-    assertEquals("dn[9] should be excluded", 0,
-        frequency.get(dataNodes[9]).intValue());
-    assertEquals("dn[13] should be excluded", 0,
-        frequency.get(dataNodes[13]).intValue());
-    assertEquals("dn[18] should be excluded", 0,
-        frequency.get(dataNodes[18]).intValue());
+      assertEquals(0,
+              frequency.get(dataNodes[3]).intValue(), "dn[3] should be excluded");
+      assertEquals(0,
+              frequency.get(dataNodes[5]).intValue(), "dn[5] should be exclude18d");
+      assertEquals(0,
+              frequency.get(dataNodes[7]).intValue(), "dn[7] should be excluded");
+      assertEquals(0,
+              frequency.get(dataNodes[9]).intValue(), "dn[9] should be excluded");
+      assertEquals(0,
+              frequency.get(dataNodes[13]).intValue(), "dn[13] should be excluded");
+      assertEquals(0,
+              frequency.get(dataNodes[18]).intValue(), "dn[18] should be excluded");
     for (Node key : dataNodes) {
       if (excludedNodes.contains(key)) {
         continue;
@@ -506,14 +503,14 @@
       cluster.waitActive();
       
       NamenodeProtocols nn = cluster.getNameNodeRpc();
-      Assert.assertNotNull(nn);
+      Assertions.assertNotNull(nn);
       
       // Wait for one DataNode to register.
       // The other DataNode will not be able to register up because of the rack mismatch.
       DatanodeInfo[] info;
       while (true) {
         info = nn.getDatanodeReport(DatanodeReportType.LIVE);
-        Assert.assertFalse(info.length == 2);
+        Assertions.assertFalse(info.length == 2);
         if (info.length == 1) {
           break;
         }
@@ -543,7 +540,7 @@
         }
         Thread.sleep(1000);
       }
-      Assert.assertEquals(info[0].getNetworkLocation(),
+      Assertions.assertEquals(info[0].getNetworkLocation(),
                           info[1].getNetworkLocation());
     } finally {
       if (cluster != null) {
@@ -593,10 +590,10 @@
       final Node n = dataNodes[i];
       LOG.info("Verifying node {}", n);
       if (excludedNodes.contains(n)) {
-        assertEquals(n + " should not have been chosen.", 0,
-            (int) frequency.get(n));
+          assertEquals(0,
+                  (int) frequency.get(n), n + " should not have been chosen.");
       } else {
-        assertTrue(n + " should have been chosen", frequency.get(n) > 0);
+          assertTrue(frequency.get(n) > 0, n + " should have been chosen");
       }
     }
   }
@@ -610,9 +607,9 @@
     Map<Node, Integer> frequency = pickNodesAtRandom(200, scope, null);
     LOG.info("No node is excluded.");
     for (int i = 0; i < 5; ++i) {
-      // all nodes should be more than zero
-      assertTrue(dataNodes[i] + " should have been chosen.",
-          frequency.get(dataNodes[i]) > 0);
+        // all nodes should be more than zero
+        assertTrue(
+                frequency.get(dataNodes[i]) > 0, dataNodes[i] + " should have been chosen.");
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 188476f..8ede951 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -18,12 +18,9 @@
 package org.apache.hadoop.security;
 
 import static org.hamcrest.CoreMatchers.startsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -44,7 +41,7 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /** Unit tests for permission */
 public class TestPermission {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
index ad5b86c..b41cede 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
@@ -21,10 +21,7 @@
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -46,11 +43,11 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestPermissionSymlinks {
 
@@ -70,7 +67,7 @@
   private static FileSystem fs;
   private static FileSystemTestWrapper wrapper;
   
-  @BeforeClass
+  @BeforeAll
   public static void beforeClassSetUp() throws Exception {
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
@@ -81,7 +78,7 @@
     wrapper = new FileSystemTestWrapper(fs);
   }
 
-  @AfterClass
+  @AfterAll
   public static void afterClassTearDown() throws Exception {
     if (fs != null) {
       fs.close();
@@ -91,7 +88,7 @@
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // Create initial test files
     fs.mkdirs(linkParent);
@@ -100,7 +97,7 @@
     wrapper.createSymlink(target, link, false);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     // Wipe out everything
     fs.delete(linkParent, true);
@@ -173,10 +170,8 @@
       }
     });
     // Make sure only the link was deleted
-    assertTrue("Target should not have been deleted!",
-        wrapper.exists(target));
-    assertFalse("Link should have been deleted!",
-        wrapper.exists(link));
+    assertTrue(wrapper.exists(target), "Target should not have been deleted!");
+    assertFalse(wrapper.exists(link), "Link should have been deleted!");
   }
 
   @Test(timeout = 5000)
@@ -235,11 +230,11 @@
       public Object run() throws IOException {
         FileContext myfc = FileContext.getFileContext(conf);
         FileStatus stat = myfc.getFileLinkStatus(link);
-        assertEquals("Expected link's FileStatus path to match link!",
-            link.makeQualified(fs.getUri(), fs.getWorkingDirectory()), stat.getPath());
+          assertEquals(
+                  link.makeQualified(fs.getUri(), fs.getWorkingDirectory()), stat.getPath(), "Expected link's FileStatus path to match link!");
         Path linkTarget = myfc.getLinkTarget(link);
-        assertEquals("Expected link's target to match target!",
-            target, linkTarget);
+          assertEquals(
+                  target, linkTarget, "Expected link's target to match target!");
         return null;
       }
     });
@@ -277,12 +272,12 @@
         Path newlink = new Path(linkParent, "newlink");
         myfc.rename(link, newlink, Rename.NONE);
         Path linkTarget = myfc.getLinkTarget(newlink);
-        assertEquals("Expected link's target to match target!",
-            target, linkTarget);
+          assertEquals(
+                  target, linkTarget, "Expected link's target to match target!");
         return null;
       }
     });
-    assertTrue("Expected target to exist", wrapper.exists(target));
+    assertTrue(wrapper.exists(target), "Expected target to exist");
   }
 
   @Test(timeout = 5000)
@@ -354,12 +349,12 @@
         Path newlink = new Path(linkParent, "newlink");
         myfs.rename(link, newlink);
         Path linkTarget = myfs.getLinkTarget(newlink);
-        assertEquals("Expected link's target to match target!",
-            target, linkTarget);
+          assertEquals(
+                  target, linkTarget, "Expected link's target to match target!");
         return null;
       }
     });
-    assertTrue("Expected target to exist", wrapper.exists(target));
+    assertTrue(wrapper.exists(target), "Expected target to exist");
   }
 
   @Test(timeout = 5000)
@@ -427,7 +422,7 @@
     } catch (AccessControlException ace) {
       // expected
       String message = ace.getMessage();
-      assertTrue(message, message.contains("is not a directory"));
+        assertTrue(message.contains("is not a directory"), message);
       assertTrue(message.contains(target.toString()));
       assertFalse(message.contains(badPath.toString()));
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
index d410d3b..fb64647 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.security;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -51,9 +49,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 
 public class TestRefreshUserMappings {
@@ -99,7 +97,7 @@
     }
   }
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     config = new Configuration();
     config.setClass("hadoop.security.group.mapping",
@@ -115,7 +113,7 @@
     GenericTestUtils.setLogLevel(Groups.LOG, Level.DEBUG);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if(cluster!=null) {
       cluster.shutdown();
@@ -143,7 +141,7 @@
     List<String> g2 = groups.getGroups(user);
     LOG.debug(g2.toString());
     for(int i=0; i<g2.size(); i++) {
-      assertEquals("Should be same group ", g1.get(i), g2.get(i));
+        assertEquals(g1.get(i), g2.get(i), "Should be same group ");
     }
 
     // Test refresh command
@@ -152,8 +150,8 @@
     List<String> g3 = groups.getGroups(user);
     LOG.debug(g3.toString());
     for(int i=0; i<g3.size(); i++) {
-      assertFalse("Should be different group: "
-              + g1.get(i) + " and " + g3.get(i), g1.get(i).equals(g3.get(i)));
+        assertFalse(g1.get(i).equals(g3.get(i)), "Should be different group: "
+                + g1.get(i) + " and " + g3.get(i));
     }
 
     // Test timeout
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index 2fe6102..c63750ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -20,8 +20,8 @@
 
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -46,9 +46,9 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.tools.JMXGet;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 
 /**
@@ -65,7 +65,7 @@
   static final int blockSize = 4096;
   static final int fileSize = 8192;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
   }
@@ -73,7 +73,7 @@
   /**
    * clean up
    */
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       if (cluster.isClusterUp()) {
@@ -105,14 +105,14 @@
     String serviceName = "NameNode";
     jmx.setService(serviceName);
     jmx.init(); // default lists namenode mbeans only
-    assertTrue("error printAllValues", checkPrintAllValues(jmx));
+      assertTrue(checkPrintAllValues(jmx), "error printAllValues");
 
     //get some data from different source
     try {
       DFSTestUtil.waitForMetric(jmx, "NumLiveDataNodes", numDatanodes);
     } catch (TimeoutException e) {
-    assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, "NumLiveDataNodes"),numDatanodes, Integer.parseInt(
-        jmx.getValue("NumLiveDataNodes")));
+        assertEquals(numDatanodes, Integer.parseInt(
+                jmx.getValue("NumLiveDataNodes")), String.format(WRONG_METRIC_VALUE_ERROR_MSG, "NumLiveDataNodes"));
     }
     assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
                 getMetrics("FSNamesystem"));
@@ -121,7 +121,7 @@
     MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
     ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
     Set<ObjectName> names = mbsc.queryNames(query, null);
-    assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
+      assertTrue(names.isEmpty(), "No beans should be registered for " + serviceName);
   }
   
   private static boolean checkPrintAllValues(JMXGet jmx) throws Exception {
@@ -166,13 +166,13 @@
     try {
       DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
     } catch (TimeoutException e) {
-      assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, "BytesWritten"), fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
+        assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")), String.format(WRONG_METRIC_VALUE_ERROR_MSG, "BytesWritten"));
     }
 
     cluster.shutdown();
     MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
     ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
     Set<ObjectName> names = mbsc.queryNames(query, null);
-    assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
+      assertTrue(names.isEmpty(), "No beans should be registered for " + serviceName);
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
index a814035e..6e0d533 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
@@ -18,7 +18,8 @@
 
 package org.apache.hadoop.tools;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PipedInputStream;
@@ -30,8 +31,8 @@
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.thirdparty.com.google.common.io.ByteStreams;
 
@@ -41,7 +42,7 @@
   private final static String INVALID_OPTION = "-invalidOption";
   private static final String[] OPTIONS = new String[2];
 
-  @BeforeClass
+  @BeforeAll
   public static void before() {
     ExitUtil.disableSystemExit();
     OPTIONS[1] = INVALID_OPTION;