Revert "HADOOP-18207. Introduce hadoop-logging module (#5503)"

This reverts commit 03a499821c9676da0896ca864074dfb8fbdefd6e.
diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index 9a060f7..4deda43 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -46,6 +46,16 @@
       <artifactId>slf4j-api</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>runtime</scope>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 4cdd600..433a615 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -82,14 +82,14 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <scope>runtime</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>runtime</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
index e18982d..f9c922c 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
@@ -15,7 +15,8 @@
 
 import java.util.Random;
 
-import org.apache.hadoop.logging.HadoopLoggerUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -29,8 +30,9 @@
   private final int timeout = 500;
   private final long rolloverFrequency = timeout / 2;
 
-  static {
-    HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG");
+  {
+    LogManager.getLogger(
+        RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
   }
 
   @Test
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index d81d1eb..628342e 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -19,7 +19,8 @@
 import javax.servlet.ServletContext;
 
 import org.apache.curator.test.TestingServer;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -38,8 +39,9 @@
   private final int timeout = 100;
   private final long rolloverFrequency = timeout / 2;
 
-  static {
-    HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG");
+  {
+    LogManager.getLogger(
+        RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
   }
 
   @Before
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 426f7a4..a9e15d0 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -419,16 +419,6 @@
       <artifactId>lz4-java</artifactId>
       <scope>provided</scope>
     </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 0866651..b4eec1f 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -299,7 +299,7 @@
 yarn.ewma.cleanupInterval=300
 yarn.ewma.messageAgeLimitSeconds=86400
 yarn.ewma.maxUniqueMessages=250
-log4j.appender.EWMA=org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
 log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index cf090ee..3287959 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -42,7 +42,6 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
@@ -51,6 +50,8 @@
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 
 /**
  * Change log level in runtime.
@@ -348,7 +349,7 @@
         }
 
         if (GenericsUtil.isLog4jLogger(logName)) {
-          process(logName, level, out);
+          process(Logger.getLogger(logName), level, out);
         } else {
           out.println("Sorry, setting log level is only supported for log4j loggers.<br />");
         }
@@ -367,17 +368,19 @@
         + "<input type='submit' value='Set Log Level' />"
         + "</form>";
 
-    private static void process(String log, String level, PrintWriter out) {
+    private static void process(Logger log, String level,
+        PrintWriter out) throws IOException {
       if (level != null) {
-        try {
-          HadoopLoggerUtils.setLogLevel(log, level);
-          out.println(MARKER + "Setting Level to " + level + " ...<br />");
-        } catch (IllegalArgumentException e) {
+        if (!level.equalsIgnoreCase(Level.toLevel(level)
+            .toString())) {
           out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
+        } else {
+          log.setLevel(Level.toLevel(level));
+          out.println(MARKER + "Setting Level to " + level + " ...<br />");
         }
       }
-      out.println(MARKER + "Effective Level: <b>" + HadoopLoggerUtils.getEffectiveLevel(log)
-          + "</b><br />");
+      out.println(MARKER
+          + "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
     }
 
   }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 3c13fea..3debd36 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -40,8 +40,8 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.log4j.LogManager;
 
 import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
 
@@ -761,7 +761,7 @@
         public void run() {
           log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
             "Shutting down " + classname + " at " + hostname}));
-          HadoopLoggerUtils.shutdownLogManager();
+          LogManager.shutdown();
         }
       }, SHUTDOWN_HOOK_PRIORITY);
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 913826f..b3487ef 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -68,7 +68,6 @@
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.alias.CredentialProvider;
 import org.apache.hadoop.security.alias.CredentialProviderFactory;
@@ -77,8 +76,10 @@
 
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.mockito.Mockito;
-import org.slf4j.LoggerFactory;
 
 public class TestConfiguration {
 
@@ -219,7 +220,9 @@
     InputStream in2 = new ByteArrayInputStream(bytes2);
 
     // Attach our own log appender so we can verify output
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    TestAppender appender = new TestAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
 
     try {
       // Add the 2 different resources - this should generate a warning
@@ -227,13 +230,17 @@
       conf.addResource(in2);
       assertEquals("should see the first value", "A", conf.get("prop"));
 
-      String renderedMessage = logCapturer.getOutput();
-      assertTrue("did not see expected string inside message " + renderedMessage,
-          renderedMessage.contains(
-              "an attempt to override final parameter: " + "prop;  Ignoring."));
+      List<LoggingEvent> events = appender.getLog();
+      assertEquals("overriding a final parameter should cause logging", 1,
+          events.size());
+      LoggingEvent loggingEvent = events.get(0);
+      String renderedMessage = loggingEvent.getRenderedMessage();
+      assertTrue("did not see expected string inside message "+ renderedMessage,
+          renderedMessage.contains("an attempt to override final parameter: "
+              + "prop;  Ignoring."));
     } finally {
       // Make sure the appender is removed
-      logCapturer.stopCapturing();
+      logger.removeAppender(appender);
     }
   }
 
@@ -251,7 +258,9 @@
     InputStream in2 = new ByteArrayInputStream(bytes);
 
     // Attach our own log appender so we can verify output
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    TestAppender appender = new TestAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
 
     try {
       // Add the resource twice from a stream - should not generate warnings
@@ -259,15 +268,20 @@
       conf.addResource(in2);
       assertEquals("A", conf.get("prop"));
 
-      String appenderOutput = logCapturer.getOutput();
+      List<LoggingEvent> events = appender.getLog();
+      for (LoggingEvent loggingEvent : events) {
+        System.out.println("Event = " + loggingEvent.getRenderedMessage());
+      }
       assertTrue("adding same resource twice should not cause logging",
-          appenderOutput.isEmpty());
+          events.isEmpty());
     } finally {
       // Make sure the appender is removed
-      logCapturer.stopCapturing();
+      logger.removeAppender(appender);
     }
   }
 
+
+
   @Test
   public void testFinalWarningsMultiple() throws Exception {
     // Make a configuration file with a repeated final property
@@ -281,19 +295,24 @@
     InputStream in1 = new ByteArrayInputStream(bytes);
 
     // Attach our own log appender so we can verify output
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    TestAppender appender = new TestAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
 
     try {
       // Add the resource - this should not produce a warning
       conf.addResource(in1);
       assertEquals("should see the value", "A", conf.get("prop"));
 
-      String appenderOutput = logCapturer.getOutput();
+      List<LoggingEvent> events = appender.getLog();
+      for (LoggingEvent loggingEvent : events) {
+        System.out.println("Event = " + loggingEvent.getRenderedMessage());
+      }
       assertTrue("adding same resource twice should not cause logging",
-          appenderOutput.isEmpty());
+          events.isEmpty());
     } finally {
       // Make sure the appender is removed
-      logCapturer.stopCapturing();
+      logger.removeAppender(appender);
     }
   }
 
@@ -310,20 +329,48 @@
     InputStream in1 = new ByteArrayInputStream(bytes);
 
     // Attach our own log appender so we can verify output
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    TestAppender appender = new TestAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
 
     try {
       // Add the resource - this should produce a warning
       conf.addResource(in1);
       assertEquals("should see the value", "A", conf.get("prop"));
 
-      String renderedMessage = logCapturer.getOutput();
-      assertTrue("did not see expected string inside message " + renderedMessage,
-          renderedMessage.contains(
-              "an attempt to override final parameter: " + "prop;  Ignoring."));
+      List<LoggingEvent> events = appender.getLog();
+      assertEquals("overriding a final parameter should cause logging", 1,
+          events.size());
+      LoggingEvent loggingEvent = events.get(0);
+      String renderedMessage = loggingEvent.getRenderedMessage();
+      assertTrue("did not see expected string inside message "+ renderedMessage,
+          renderedMessage.contains("an attempt to override final parameter: "
+              + "prop;  Ignoring."));
     } finally {
       // Make sure the appender is removed
-      logCapturer.stopCapturing();
+      logger.removeAppender(appender);
+    }
+  }
+
+  /**
+   * A simple appender for white box testing.
+   */
+  private static class TestAppender extends AppenderSkeleton {
+    private final List<LoggingEvent> log = new ArrayList<>();
+
+    @Override public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override protected void append(final LoggingEvent loggingEvent) {
+      log.add(loggingEvent);
+    }
+
+    @Override public void close() {
+    }
+
+    public List<LoggingEvent> getLog() {
+      return new ArrayList<>(log);
     }
   }
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
index 9e4405f..c016ff0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
@@ -36,9 +36,8 @@
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.log4j.Logger;
 import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -48,6 +47,9 @@
 
 public class CompressDecompressTester<T extends Compressor, E extends Decompressor> {
 
+  private static final Logger logger = Logger
+      .getLogger(CompressDecompressTester.class);
+
   private final byte[] originalRawData;
 
   private ImmutableList<TesterPair<T, E>> pairs = ImmutableList.of();
@@ -486,12 +488,12 @@
 
     return false;      
   }
-
+  
   abstract static class TesterCompressionStrategy {
 
-    protected final Logger logger = LoggerFactory.getLogger(getClass());
+    protected final Logger logger = Logger.getLogger(getClass());
 
-    abstract void assertCompression(String name, Compressor compressor, Decompressor decompressor,
-        byte[] originalRawData) throws Exception;
+    abstract void assertCompression(String name, Compressor compressor,
+        Decompressor decompressor, byte[] originalRawData) throws Exception;
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
index 99a1ff8..636c03a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
@@ -29,7 +29,6 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.log.LogLevel.CLI;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
@@ -41,11 +40,12 @@
 import org.junit.Assert;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.net.ssl.SSLException;
@@ -67,7 +67,7 @@
   private final String logName = TestLogLevel.class.getName();
   private String clientPrincipal;
   private String serverPrincipal;
-  private final Logger log = LoggerFactory.getLogger(logName);
+  private final Logger log = Logger.getLogger(logName);
   private final static String PRINCIPAL = "loglevel.principal";
   private final static String KEYTAB  = "loglevel.keytab";
   private static final String PREFIX = "hadoop.http.authentication.";
@@ -76,7 +76,7 @@
   public static void setUp() throws Exception {
     org.slf4j.Logger logger =
         LoggerFactory.getLogger(KerberosAuthenticator.class);
-    HadoopLoggerUtils.setLogLevel(logger.getName(), "DEBUG");
+    GenericTestUtils.setLogLevel(logger, Level.DEBUG);
     FileUtil.fullyDelete(BASEDIR);
     if (!BASEDIR.mkdirs()) {
       throw new Exception("unable to create the base directory for testing");
@@ -230,7 +230,7 @@
       final String connectProtocol, final boolean isSpnego)
       throws Exception {
     testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego,
-        "DEBUG");
+        Level.DEBUG.toString());
   }
 
   /**
@@ -250,8 +250,9 @@
     if (!LogLevel.isValidProtocol(connectProtocol)) {
       throw new Exception("Invalid client protocol " + connectProtocol);
     }
-    String oldLevel = HadoopLoggerUtils.getEffectiveLevel(log.getName());
-    Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.", "ERROR", oldLevel);
+    Level oldLevel = log.getEffectiveLevel();
+    Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.",
+        Level.ERROR, oldLevel);
 
     // configs needed for SPNEGO at server side
     if (isSpnego) {
@@ -287,7 +288,7 @@
     });
     server.stop();
     // restore log level
-    HadoopLoggerUtils.setLogLevel(log.getName(), oldLevel.toString());
+    GenericTestUtils.setLogLevel(log, oldLevel);
   }
 
   /**
@@ -321,7 +322,7 @@
     cli.run(setLevelArgs);
 
     assertEquals("new level not equal to expected: ", newLevel.toUpperCase(),
-        HadoopLoggerUtils.getEffectiveLevel(log.getName()));
+        log.getEffectiveLevel().toString());
   }
 
   /**
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
index 8cfa14c..0dabe46 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
@@ -42,9 +42,8 @@
 import static org.apache.hadoop.metrics2.lib.Interns.info;
 import static org.junit.Assert.assertEquals;
 
+import org.apache.log4j.Logger;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.management.MBeanAttributeInfo;
 import javax.management.MBeanInfo;
@@ -242,7 +241,7 @@
     private MetricsSourceAdapter sa = null;
     private ScheduledFuture<?> future = null;
     private AtomicBoolean hasError = null;
-    private static final Logger LOG = LoggerFactory.getLogger(SourceUpdater.class);
+    private static final Logger LOG = Logger.getLogger(SourceUpdater.class);
 
     public SourceUpdater(MetricsSourceAdapter sourceAdapter,
         AtomicBoolean err) {
@@ -264,7 +263,7 @@
       } catch (Exception e) {
         // catch all errors
         hasError.set(true);
-        LOG.error("Something went wrong.", e);
+        LOG.error(e.getStackTrace());
       } finally {
         if (hasError.get()) {
           LOG.error("Hit error, stopping now");
@@ -285,7 +284,7 @@
     private int cnt = 0;
     private ScheduledFuture<?> future = null;
     private AtomicBoolean hasError = null;
-    private static final Logger LOG = LoggerFactory.getLogger(SourceReader.class);
+    private static final Logger LOG = Logger.getLogger(SourceReader.class);
 
     public SourceReader(
         TestMetricsSource source, MetricsSourceAdapter sourceAdapter,
@@ -319,7 +318,7 @@
       } catch (Exception e) {
         // catch other errors
         hasError.set(true);
-        LOG.error("Something went wrong.", e);
+        LOG.error(e.getStackTrace());
       } finally {
         if (hasError.get()) {
           future.cancel(false);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
index b139971..8c1339d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
@@ -22,7 +22,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
@@ -41,8 +41,8 @@
   private static final Logger TESTLOG =
       LoggerFactory.getLogger(TestShellBasedUnixGroupsMapping.class);
 
-  private final LogCapturer shellMappingLog =
-      LogCapturer.captureLogs(
+  private final GenericTestUtils.LogCapturer shellMappingLog =
+      GenericTestUtils.LogCapturer.captureLogs(
           ShellBasedUnixGroupsMapping.LOG);
 
   private class TestGroupUserNotExist
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
index 6a6fff8..a0ce721 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
@@ -19,8 +19,6 @@
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -44,7 +42,7 @@
     private static final String BASEDIR = GenericTestUtils.getTempPath(
             TestReloadingX509TrustManager.class.getSimpleName());
 
-    private final LogCapturer reloaderLog = LogCapturer.captureLogs(
+    private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs(
             FileMonitoringTimerTask.LOG);
 
     @BeforeClass
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
index 8d2a4c7..6358959 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
@@ -19,7 +19,7 @@
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 
 import java.util.function.Supplier;
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
index 839c51c..b7b86b7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.service;
 
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Mock;
@@ -29,7 +29,7 @@
 
 import java.io.PrintWriter;
 
-import static org.apache.hadoop.logging.LogCapturer.captureLogs;
+import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.times;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 825fc70..e54971e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -25,6 +25,7 @@
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.PrintStream;
+import java.io.StringWriter;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
@@ -37,6 +38,7 @@
 import java.util.Objects;
 import java.util.Random;
 import java.util.Set;
+import java.util.Enumeration;
 import java.util.TreeSet;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CountDownLatch;
@@ -51,11 +53,17 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
 import org.apache.hadoop.util.DurationInfo;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.log4j.Appender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+import org.apache.log4j.WriterAppender;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.mockito.invocation.InvocationOnMock;
@@ -107,17 +115,51 @@
   public static final String ERROR_INVALID_ARGUMENT =
       "Total wait time should be greater than check interval time";
 
+  @Deprecated
+  public static Logger toLog4j(org.slf4j.Logger logger) {
+    return LogManager.getLogger(logger.getName());
+  }
+
+  /**
+   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
+   */
+  @Deprecated
+  public static void disableLog(Logger logger) {
+    logger.setLevel(Level.OFF);
+  }
+
   public static void disableLog(org.slf4j.Logger logger) {
-    HadoopLoggerUtils.setLogLevel(logger.getName(), "OFF");
+    disableLog(toLog4j(logger));
+  }
+
+  public static void setLogLevel(Logger logger, Level level) {
+    logger.setLevel(level);
+  }
+
+  /**
+   * @deprecated
+   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
+   */
+  @Deprecated
+  public static void setLogLevel(org.slf4j.Logger logger, Level level) {
+    setLogLevel(toLog4j(logger), level);
   }
 
   public static void setLogLevel(org.slf4j.Logger logger,
                                  org.slf4j.event.Level level) {
-    HadoopLoggerUtils.setLogLevel(logger.getName(), level.toString());
+    setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
   }
 
   public static void setRootLogLevel(org.slf4j.event.Level level) {
-    HadoopLoggerUtils.setLogLevel("root", level.toString());
+    setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
+  }
+
+  public static void setCurrentLoggersLogLevel(org.slf4j.event.Level level) {
+    for (Enumeration<?> loggers = LogManager.getCurrentLoggers();
+        loggers.hasMoreElements();) {
+      Logger logger = (Logger) loggers.nextElement();
+      logger.setLevel(Level.toLevel(level.toString()));
+    }
   }
 
   public static org.slf4j.event.Level toLevel(String level) {
@@ -429,6 +471,47 @@
     }
   }
 
+  public static class LogCapturer {
+    private StringWriter sw = new StringWriter();
+    private WriterAppender appender;
+    private Logger logger;
+
+    public static LogCapturer captureLogs(org.slf4j.Logger logger) {
+      if (logger.getName().equals("root")) {
+        return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
+      }
+      return new LogCapturer(toLog4j(logger));
+    }
+
+    public static LogCapturer captureLogs(Logger logger) {
+      return new LogCapturer(logger);
+    }
+
+    private LogCapturer(Logger logger) {
+      this.logger = logger;
+      Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
+      if (defaultAppender == null) {
+        defaultAppender = Logger.getRootLogger().getAppender("console");
+      }
+      final Layout layout = (defaultAppender == null) ? new PatternLayout() :
+          defaultAppender.getLayout();
+      this.appender = new WriterAppender(layout, sw);
+      logger.addAppender(this.appender);
+    }
+
+    public String getOutput() {
+      return sw.toString();
+    }
+
+    public void stopCapturing() {
+      logger.removeAppender(appender);
+    }
+
+    public void clearOutput() {
+      sw.getBuffer().setLength(0);
+    }
+  }
+
   /**
    * Mockito answer helper that triggers one latch as soon as the
    * method is called, then waits on another before continuing.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index f6f4a44..8489e3d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -26,8 +26,6 @@
 import java.util.function.Supplier;
 import org.slf4j.event.Level;
 
-import org.apache.hadoop.logging.LogCapturer;
-
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
index 8375864..98e1822 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
@@ -22,8 +22,8 @@
 
 import org.junit.Assert;
 
+import org.apache.log4j.Logger;
 import org.junit.Test;
-import org.slf4j.Logger;
 
 public class TestClassUtil {
   @Test(timeout=10000)
@@ -35,6 +35,6 @@
     Assert.assertTrue("Containing jar does not exist on file system ",
         jarFile.exists());
     Assert.assertTrue("Incorrect jar file " + containingJar,
-        jarFile.getName().matches("slf4j-api.*[.]jar"));
+        jarFile.getName().matches("log4j.*[.]jar"));
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
index ec26af66..1d1ce89 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
@@ -28,7 +28,7 @@
 import static org.junit.Assert.*;
 
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.assertj.core.api.Assertions;
 import org.junit.Before;
 import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
index fb6221f..f43930d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
@@ -28,12 +28,10 @@
 import java.util.Random;
 
 import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.util.hash.Hash;
+import org.apache.log4j.Logger;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
@@ -115,7 +113,7 @@
   }
 
   interface FilterTesterStrategy {
-    Logger logger = LoggerFactory.getLogger(FilterTesterStrategy.class);
+    final Logger logger = Logger.getLogger(FilterTesterStrategy.class);
 
     void assertWhat(Filter filter, int numInsertions, int hashId,
         ImmutableSet<Integer> falsePositives);
diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml
index 8a04c4e..96588a2 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -54,12 +54,6 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
       <groupId>org.apache.hadoop.thirdparty</groupId>
       <artifactId>hadoop-shaded-guava</artifactId>
       <scope>compile</scope>
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 97d85428..f4c7fbe 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -49,7 +49,6 @@
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Time;
 import org.apache.http.client.utils.URIBuilder;
 import org.junit.After;
@@ -584,8 +583,8 @@
   @Test
   public void testStartStopHttpPseudo() throws Exception {
     // Make sure bogus errors don't get emitted.
-    LogCapturer logs =
-        LogCapturer.captureLogs(LoggerFactory.getLogger(
+    GenericTestUtils.LogCapturer logs =
+        GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger(
             "com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator"));
     try {
       testStartStop(false, false);
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
index 6e12d94..3d0fd7d 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
@@ -18,24 +18,23 @@
 package org.apache.hadoop.crypto.key.kms.server;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
 import java.io.FilterOutputStream;
+import java.io.InputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.nio.file.Paths;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang3.reflect.FieldUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-
+import org.apache.hadoop.util.ThreadUtil;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.PropertyConfigurator;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -68,23 +67,24 @@
   public final Timeout testTimeout = new Timeout(180000L, TimeUnit.MILLISECONDS);
 
   @Before
-  public void setUp() throws IOException, URISyntaxException {
+  public void setUp() throws IOException {
     originalOut = System.err;
     memOut = new ByteArrayOutputStream();
     filterOut = new FilterOut(memOut);
     capturedOut = new PrintStream(filterOut);
     System.setErr(capturedOut);
-    URL url = getClass().getClassLoader().getResource("log4j-kmsaudit.properties");
-    File file = Paths.get(url.toURI()).toFile();
-    HadoopLoggerUtils.updateLog4jConfiguration(KMSAudit.class, file.getAbsolutePath());
+    InputStream is =
+        ThreadUtil.getResourceAsStream("log4j-kmsaudit.properties");
+    PropertyConfigurator.configure(is);
+    IOUtils.closeStream(is);
     Configuration conf = new Configuration();
     this.kmsAudit = new KMSAudit(conf);
   }
 
   @After
-  public void cleanUp() throws Exception {
+  public void cleanUp() {
     System.setErr(originalOut);
-    HadoopLoggerUtils.resetConfiguration();
+    LogManager.resetConfiguration();
     kmsAudit.shutdown();
   }
 
diff --git a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 304d1e4..0000000
--- a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<FindBugsFilter>
-  <!--
-   conversionPattern is only set once and used to initiate PatternLayout object
-   only once. It is set by log4j framework if set as part of log4j properties and accessed
-   only during first append operation.
-  -->
-  <Match>
-    <Class name="org.apache.hadoop.logging.appenders.AsyncRFAAppender"/>
-    <Field name="conversionPattern"/>
-    <Bug pattern="IS2_INCONSISTENT_SYNC"/>
-  </Match>
-
-  <!-- Following fields are used in ErrorsAndWarningsBlock, which is not a part of analysis of findbugs -->
-  <Match>
-    <Class name="org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender$Element"/>
-    <Or>
-      <Field name="count"/>
-      <Field name="timestampSeconds"/>
-    </Or>
-    <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD"/>
-  </Match>
-
-</FindBugsFilter>
diff --git a/hadoop-common-project/hadoop-logging/pom.xml b/hadoop-common-project/hadoop-logging/pom.xml
deleted file mode 100644
index 20af2be..0000000
--- a/hadoop-common-project/hadoop-logging/pom.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <parent>
-    <artifactId>hadoop-project</artifactId>
-    <groupId>org.apache.hadoop</groupId>
-    <version>3.4.0-SNAPSHOT</version>
-    <relativePath>../../hadoop-project</relativePath>
-  </parent>
-  <modelVersion>4.0.0</modelVersion>
-
-  <artifactId>hadoop-logging</artifactId>
-  <version>3.4.0-SNAPSHOT</version>
-  <packaging>jar</packaging>
-
-  <name>Apache Hadoop Logging</name>
-  <description>Logging Support for Apache Hadoop project</description>
-
-  <properties>
-    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-lang3</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-source-plugin</artifactId>
-        <executions>
-          <execution>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>jar</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <attach>true</attach>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>prepare-jar</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>jar</goal>
-            </goals>
-          </execution>
-          <execution>
-            <id>prepare-test-jar</id>
-            <phase>prepare-package</phase>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>dev-support/findbugsExcludeFile.xml</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-</project>
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java
deleted file mode 100644
index b0bd2e3..0000000
--- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.logging;
-
-import java.io.FileInputStream;
-import java.io.Flushable;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.Enumeration;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.log4j.Appender;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PropertyConfigurator;
-
-/**
- * Hadoop's internal class that access log4j APIs directly.
- * <p/>
- * This class will depend on log4j directly, so callers should not use this class directly to avoid
- * introducing log4j dependencies to downstream users. Please call the methods in
- * {@link HadoopLoggerUtils}, as they will call the methods here through reflection.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-final class HadoopInternalLog4jUtils {
-
-  private HadoopInternalLog4jUtils() {
-  }
-
-  static void setLogLevel(String loggerName, String levelName) {
-    if (loggerName == null) {
-      throw new IllegalArgumentException("logger name cannot be null");
-    }
-    Logger logger = loggerName.equalsIgnoreCase("root") ?
-        LogManager.getRootLogger() :
-        LogManager.getLogger(loggerName);
-    Level level = Level.toLevel(levelName.toUpperCase());
-    if (!level.toString().equalsIgnoreCase(levelName)) {
-      throw new IllegalArgumentException("Unsupported log level " + levelName);
-    }
-    logger.setLevel(level);
-  }
-
-  static void shutdownLogManager() {
-    LogManager.shutdown();
-  }
-
-  static String getEffectiveLevel(String loggerName) {
-    Logger logger = loggerName.equalsIgnoreCase("root") ?
-        LogManager.getRootLogger() :
-        LogManager.getLogger(loggerName);
-    return logger.getEffectiveLevel().toString();
-  }
-
-  static void resetConfiguration() {
-    LogManager.resetConfiguration();
-  }
-
-  static void updateLog4jConfiguration(Class<?> targetClass, String log4jPath) throws Exception {
-    Properties customProperties = new Properties();
-    try (FileInputStream fs = new FileInputStream(log4jPath);
-        InputStream is = targetClass.getResourceAsStream("/log4j.properties")) {
-      customProperties.load(fs);
-      Properties originalProperties = new Properties();
-      originalProperties.load(is);
-      for (Map.Entry<Object, Object> entry : customProperties.entrySet()) {
-        originalProperties.setProperty(entry.getKey().toString(), entry.getValue().toString());
-      }
-      LogManager.resetConfiguration();
-      PropertyConfigurator.configure(originalProperties);
-    }
-  }
-
-  static boolean hasAppenders(String logger) {
-    return Logger.getLogger(logger)
-        .getAllAppenders()
-        .hasMoreElements();
-  }
-
-  @SuppressWarnings("unchecked")
-  static void syncLogs() {
-    // flush standard streams
-    //
-    System.out.flush();
-    System.err.flush();
-
-    // flush flushable appenders
-    //
-    final Logger rootLogger = Logger.getRootLogger();
-    flushAppenders(rootLogger);
-    final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository().
-        getCurrentLoggers();
-    while (allLoggers.hasMoreElements()) {
-      final Logger l = allLoggers.nextElement();
-      flushAppenders(l);
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private static void flushAppenders(Logger l) {
-    final Enumeration<Appender> allAppenders = l.getAllAppenders();
-    while (allAppenders.hasMoreElements()) {
-      final Appender a = allAppenders.nextElement();
-      if (a instanceof Flushable) {
-        try {
-          ((Flushable) a).flush();
-        } catch (IOException ioe) {
-          System.err.println(a + ": Failed to flush!"
-              + stringifyException(ioe));
-        }
-      }
-    }
-  }
-
-  private static String stringifyException(Throwable e) {
-    StringWriter stringWriter = new StringWriter();
-    PrintWriter printWriter = new PrintWriter(stringWriter);
-    e.printStackTrace(printWriter);
-    printWriter.close();
-    return stringWriter.toString();
-  }
-
-}
diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java
deleted file mode 100644
index 1d0bea1..0000000
--- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.logging;
-
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * A bridge class for operating on logging framework, such as changing log4j log level, etc.
- * Will call the methods in {@link HadoopInternalLog4jUtils} to perform operations on log4j level.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public final class HadoopLoggerUtils {
-
-  private static final String INTERNAL_UTILS_CLASS =
-      "org.apache.hadoop.logging.HadoopInternalLog4jUtils";
-
-  private HadoopLoggerUtils() {
-  }
-
-  private static Method getMethod(String methodName, Class<?>... args) {
-    try {
-      Class<?> clazz = Class.forName(INTERNAL_UTILS_CLASS);
-      return clazz.getDeclaredMethod(methodName, args);
-    } catch (ClassNotFoundException | NoSuchMethodException e) {
-      throw new AssertionError("should not happen", e);
-    }
-  }
-
-  private static void throwUnchecked(Throwable throwable) {
-    if (throwable instanceof RuntimeException) {
-      throw (RuntimeException) throwable;
-    }
-    if (throwable instanceof Error) {
-      throw (Error) throwable;
-    }
-  }
-
-  public static void shutdownLogManager() {
-    Method method = getMethod("shutdownLogManager");
-    try {
-      method.invoke(null);
-    } catch (IllegalAccessException e) {
-      throw new AssertionError("should not happen", e);
-    } catch (InvocationTargetException e) {
-      throwUnchecked(e.getCause());
-      throw new AssertionError("Failed to execute, should not happen", e.getCause());
-    }
-  }
-
-  public static void setLogLevel(String loggerName, String levelName) {
-    Method method = getMethod("setLogLevel", String.class, String.class);
-    try {
-      method.invoke(null, loggerName, levelName);
-    } catch (IllegalAccessException e) {
-      throw new AssertionError("should not happen", e);
-    } catch (InvocationTargetException e) {
-      throwUnchecked(e.getCause());
-      throw new AssertionError("Failed to execute, should not happen", e.getCause());
-    }
-  }
-
-  public static String getEffectiveLevel(String loggerName) {
-    Method method = getMethod("getEffectiveLevel", String.class);
-    try {
-      return (String) method.invoke(null, loggerName);
-    } catch (IllegalAccessException e) {
-      throw new AssertionError("should not happen", e);
-    } catch (InvocationTargetException e) {
-      throwUnchecked(e.getCause());
-      throw new AssertionError("Failed to execute, should not happen", e.getCause());
-    }
-  }
-
-  public static void resetConfiguration() {
-    Method method = getMethod("resetConfiguration");
-    try {
-      method.invoke(null);
-    } catch (IllegalAccessException e) {
-      throw new AssertionError("should not happen", e);
-    } catch (InvocationTargetException e) {
-      throwUnchecked(e.getCause());
-      throw new AssertionError("Failed to execute, should not happen", e.getCause());
-    }
-  }
-
-  public static void updateLog4jConfiguration(Class<?> targetClass, String log4jPath) {
-    Method method = getMethod("updateLog4jConfiguration", Class.class, String.class);
-    try {
-      method.invoke(null, targetClass, log4jPath);
-    } catch (IllegalAccessException e) {
-      throw new AssertionError("should not happen", e);
-    } catch (InvocationTargetException e) {
-      throwUnchecked(e.getCause());
-      throw new AssertionError("Failed to execute, should not happen", e.getCause());
-    }
-  }
-
-  public static boolean hasAppenders(String logger) {
-    Method method = getMethod("hasAppenders", String.class);
-    try {
-      return (Boolean) method.invoke(null, logger);
-    } catch (IllegalAccessException e) {
-      throw new AssertionError("should not happen", e);
-    } catch (InvocationTargetException e) {
-      throwUnchecked(e.getCause());
-      throw new AssertionError("Failed to execute, should not happen", e.getCause());
-    }
-  }
-
-  public synchronized static void syncLogs() {
-    Method method = getMethod("syncLogs");
-    try {
-      method.invoke(null);
-    } catch (IllegalAccessException e) {
-      throw new AssertionError("should not happen", e);
-    } catch (InvocationTargetException e) {
-      throwUnchecked(e.getCause());
-      throw new AssertionError("Failed to execute, should not happen", e.getCause());
-    }
-  }
-
-}
diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java
deleted file mode 100644
index 45f5d0c..0000000
--- a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.logging;
-
-import java.io.StringWriter;
-
-import org.apache.log4j.Appender;
-import org.apache.log4j.Layout;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import org.apache.log4j.WriterAppender;
-
-public class LogCapturer {
-  private final StringWriter sw = new StringWriter();
-  private final Appender appender;
-  private final Logger logger;
-
-  public static LogCapturer captureLogs(org.slf4j.Logger logger) {
-    if (logger.getName().equals("root")) {
-      return new LogCapturer(Logger.getRootLogger());
-    }
-    return new LogCapturer(LogManager.getLogger(logger.getName()));
-  }
-
-  private LogCapturer(Logger logger) {
-    this.logger = logger;
-    Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
-    if (defaultAppender == null) {
-      defaultAppender = Logger.getRootLogger().getAppender("console");
-    }
-    final Layout layout =
-        (defaultAppender == null) ? new PatternLayout() : defaultAppender.getLayout();
-    this.appender = new WriterAppender(layout, sw);
-    logger.addAppender(this.appender);
-  }
-
-  public String getOutput() {
-    return sw.toString();
-  }
-
-  public void stopCapturing() {
-    logger.removeAppender(appender);
-  }
-
-  public void clearOutput() {
-    sw.getBuffer().setLength(0);
-  }
-}
diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java
deleted file mode 100644
index 4bafb5a..0000000
--- a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.logging.test;
-
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.logging.HadoopLoggerUtils;
-
-public class TestSyncLogs {
-
-  private static final Logger LOG = LoggerFactory.getLogger(TestSyncLogs.class);
-
-  @Test
-  public void testSyncLogs() {
-    LOG.info("Testing log sync");
-    HadoopLoggerUtils.syncLogs();
-  }
-
-}
diff --git a/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties
deleted file mode 100644
index ff1468c..0000000
--- a/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=debug,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml
index d2e9933..c292aeb 100644
--- a/hadoop-common-project/hadoop-minikdc/pom.xml
+++ b/hadoop-common-project/hadoop-minikdc/pom.xml
@@ -39,6 +39,11 @@
       <artifactId>kerb-simplekdc</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>compile</scope>
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index b0fb888..f167a07 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -38,7 +38,6 @@
     <module>hadoop-minikdc</module>
     <module>hadoop-kms</module>
     <module>hadoop-registry</module>
-    <module>hadoop-logging</module>
   </modules>
 
   <build>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 9a1226e..b362e00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -87,12 +87,6 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
       <groupId>org.mock-server</groupId>
       <artifactId>mockserver-netty</artifactId>
       <scope>test</scope>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
index d0b8653..1fe6dca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
@@ -31,7 +31,6 @@
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Lists;
 import org.junit.Assert;
 import org.junit.Test;
@@ -62,8 +61,8 @@
   public void testSSLInitFailure() throws Exception {
     Configuration conf = new Configuration();
     conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo");
-    LogCapturer logs =
-        LogCapturer.captureLogs(
+    GenericTestUtils.LogCapturer logs =
+        GenericTestUtils.LogCapturer.captureLogs(
             LoggerFactory.getLogger(URLConnectionFactory.class));
     URLConnectionFactory.newDefaultURLConnectionFactory(conf);
     Assert.assertTrue("Expected log for ssl init failure not found!",
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index b9aae62..a5bf5c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -182,12 +182,6 @@
       <artifactId>junit-jupiter-params</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
index 9f74337..0741f1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
@@ -40,7 +40,6 @@
 import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
 
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX;
 import static org.junit.Assert.assertEquals;
@@ -49,8 +48,8 @@
 
   private static final Logger LOG =
       LoggerFactory.getLogger(TestRouterRefreshFairnessPolicyController.class);
-  private final LogCapturer controllerLog =
-      LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG);
+  private final GenericTestUtils.LogCapturer controllerLog =
+      GenericTestUtils.LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG);
 
   private StateStoreDFSCluster cluster;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
index d4f6827..1f5770b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.federation.router.FederationUtil;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.slf4j.LoggerFactory;
@@ -179,7 +179,7 @@
 
   private void verifyInstantiationError(Configuration conf, int handlerCount,
       int totalDedicatedHandlers) {
-    LogCapturer logs = LogCapturer
+    GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer
         .captureLogs(LoggerFactory.getLogger(
             StaticRouterRpcFairnessPolicyController.class));
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
index bb81eaa..9ee9692 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
@@ -40,6 +40,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
@@ -54,7 +55,6 @@
 import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
@@ -322,7 +322,11 @@
       int httpsRequests, int requestsPerService) {
 
     // Attach our own log appender so we can verify output
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    final LogVerificationAppender appender =
+        new LogVerificationAppender();
+    final org.apache.log4j.Logger logger =
+        org.apache.log4j.Logger.getRootLogger();
+    logger.addAppender(appender);
     GenericTestUtils.setRootLogLevel(Level.DEBUG);
 
     // Setup and start the Router
@@ -343,11 +347,8 @@
         heartbeatService.getNamenodeStatusReport();
       }
     }
-    assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
-        "JMX URL: https://"));
-    assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
-        "JMX URL: http://"));
-    logCapturer.stopCapturing();
+    assertEquals(httpsRequests * 2, appender.countLinesWithMessage("JMX URL: https://"));
+    assertEquals(httpRequests * 2, appender.countLinesWithMessage("JMX URL: http://"));
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
index 3db20a6..d3d3421 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
@@ -135,8 +135,6 @@
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.AfterClass;
@@ -2069,8 +2067,8 @@
 
   @Test
   public void testMkdirsWithCallerContext() throws IOException {
-    LogCapturer auditlog =
-        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    GenericTestUtils.LogCapturer auditlog =
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // Current callerContext is null
     assertNull(CallerContext.getCurrent());
@@ -2096,8 +2094,8 @@
   @Test
   public void testRealUserPropagationInCallerContext()
       throws IOException, InterruptedException {
-    LogCapturer auditlog =
-        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    GenericTestUtils.LogCapturer auditlog =
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // Current callerContext is null
     assertNull(CallerContext.getCurrent());
@@ -2141,8 +2139,8 @@
 
   @Test
   public void testAddClientIpPortToCallerContext() throws IOException {
-    LogCapturer auditLog =
-        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    GenericTestUtils.LogCapturer auditLog =
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // 1. ClientIp and ClientPort are not set on the client.
     // Set client context.
@@ -2176,8 +2174,8 @@
 
   @Test
   public void testAddClientIdAndCallIdToCallerContext() throws IOException {
-    LogCapturer auditLog =
-        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    GenericTestUtils.LogCapturer auditLog =
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // 1. ClientId and ClientCallId are not set on the client.
     // Set client context.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
index caecb69..336ea39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
@@ -72,8 +72,6 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
 import org.junit.Test;
 import org.slf4j.event.Level;
 
@@ -278,10 +276,12 @@
   @Test
   public void testPreviousBlockNotNull()
       throws IOException, URISyntaxException {
-    final LogCapturer stateChangeLog = LogCapturer.captureLogs(NameNode.stateChangeLog);
+    final GenericTestUtils.LogCapturer stateChangeLog =
+        GenericTestUtils.LogCapturer.captureLogs(NameNode.stateChangeLog);
     GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.DEBUG);
 
-    final LogCapturer nameNodeLog = LogCapturer.captureLogs(NameNode.LOG);
+    final GenericTestUtils.LogCapturer nameNodeLog =
+        GenericTestUtils.LogCapturer.captureLogs(NameNode.LOG);
     GenericTestUtils.setLogLevel(NameNode.LOG, Level.DEBUG);
 
     final FederationRPCMetrics metrics = getRouterContext().
@@ -454,8 +454,8 @@
 
   @Test
   public void testCallerContextWithMultiDestinations() throws IOException {
-    LogCapturer auditLog =
-        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    GenericTestUtils.LogCapturer auditLog =
+        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // set client context
     CallerContext.setCurrent(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 5c2df9a..8632c56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -310,4 +310,14 @@
       <Method name="reconcile" />
       <Bug pattern="SWL_SLEEP_WITH_LOCK_HELD" />
     </Match>
+    <!--
+     conversionPattern is only set once and used to initiate PatternLayout object
+     only once. It is set by log4j framework if set as part of log4j properties and accessed
+     only during first append operation.
+    -->
+    <Match>
+      <Class name="org.apache.hadoop.hdfs.util.AsyncRFAAppender"/>
+      <Field name="conversionPattern"/>
+      <Bug pattern="IS2_INCONSISTENT_SYNC"/>
+    </Match>
 </FindBugsFilter>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index a8922cb..5f15649 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -165,12 +165,6 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-core</artifactId>
       <scope>test</scope>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
index a361a28..21c01ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
@@ -31,8 +31,6 @@
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.metrics2.util.MBeans;
 
 /**
@@ -113,8 +111,11 @@
         .substring(0, maxLogLineLength) + "...");
   }
 
+  // TODO : hadoop-logging module to hide log4j implementation details, this method
+  //  can directly call utility from hadoop-logging.
   private static boolean hasAppenders(Logger logger) {
-    return HadoopLoggerUtils.hasAppenders(logger.getName());
+    return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders()
+        .hasMoreElements();
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
index 4e8daf3..ab30110 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
@@ -32,11 +32,11 @@
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor;
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -110,13 +110,13 @@
   }
 
   static void initLogLevels() {
-    Util.setLogLevel(FSImage.class, "TRACE");
-    Util.setLogLevel(FileJournalManager.class, "TRACE");
+    Util.setLogLevel(FSImage.class, Level.TRACE);
+    Util.setLogLevel(FileJournalManager.class, Level.TRACE);
 
-    Util.setLogLevel(GSet.class, "OFF");
-    Util.setLogLevel(BlockManager.class, "OFF");
-    Util.setLogLevel(DatanodeManager.class, "OFF");
-    Util.setLogLevel(TopMetrics.class, "OFF");
+    Util.setLogLevel(GSet.class, Level.OFF);
+    Util.setLogLevel(BlockManager.class, Level.OFF);
+    Util.setLogLevel(DatanodeManager.class, Level.OFF);
+    Util.setLogLevel(TopMetrics.class, Level.OFF);
   }
 
   static class Util {
@@ -127,10 +127,11 @@
           + ", max=" + StringUtils.byteDesc(runtime.maxMemory());
     }
 
-    static void setLogLevel(Class<?> clazz, String level) {
-      HadoopLoggerUtils.setLogLevel(clazz.getName(), level);
+    static void setLogLevel(Class<?> clazz, Level level) {
+      final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz);
+      logger.setLevel(level);
       LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level,
-          HadoopLoggerUtils.getEffectiveLevel(clazz.getName()));
+          logger.getEffectiveLevel());
     }
 
     static String toCommaSeparatedNumber(long n) {
diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
similarity index 98%
rename from hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
index 2abfffb..276e5b0 100644
--- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.logging.appenders;
+package org.apache.hadoop.hdfs.util;
 
 import java.io.IOException;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java
new file mode 100644
index 0000000..10ef47b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.spi.LoggingEvent;
+import org.apache.log4j.spi.ThrowableInformation;
+
+/**
+ * Used to verify that certain exceptions or messages are present in log output.
+ */
+public class LogVerificationAppender extends AppenderSkeleton {
+  private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
+
+  @Override
+  public boolean requiresLayout() {
+    return false;
+  }
+
+  @Override
+  protected void append(final LoggingEvent loggingEvent) {
+    log.add(loggingEvent);
+  }
+
+  @Override
+  public void close() {
+  }
+
+  public List<LoggingEvent> getLog() {
+    return new ArrayList<LoggingEvent>(log);
+  }
+  
+  public int countExceptionsWithMessage(final String text) {
+    int count = 0;
+    for (LoggingEvent e: getLog()) {
+      ThrowableInformation t = e.getThrowableInformation();
+      if (t != null) {
+        String m = t.getThrowable().getMessage();
+        if (m.contains(text)) {
+          count++;
+        }
+      }
+    }
+    return count;
+  }
+
+  public int countLinesWithMessage(final String text) {
+    int count = 0;
+    for (LoggingEvent e: getLog()) {
+      String msg = e.getRenderedMessage();
+      if (msg != null && msg.contains(text)) {
+        count++;
+      }
+    }
+    return count;
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
index 75ad5bd..b16f023 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
@@ -33,8 +33,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.logging.LogCapturer;
-
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 public class TestDFSRename {
@@ -190,8 +189,8 @@
       final DistributedFileSystem dfs = cluster.getFileSystem();
       Path path = new Path("/test");
       dfs.mkdirs(path);
-      LogCapturer auditLog =
-          LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+      GenericTestUtils.LogCapturer auditLog =
+          GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
       dfs.rename(path, new Path("/dir1"),
           new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
       String auditOut = auditLog.getOutput();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 80424a3..5469ebb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -45,9 +45,9 @@
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.IllegalReservedPathException;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Logger;
 import org.junit.Test;
 
 import static org.junit.Assert.*;
@@ -317,7 +317,9 @@
         "imageMD5Digest", "22222222222222222222222222222222");
     
     // Attach our own log appender so we can verify output
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    final LogVerificationAppender appender = new LogVerificationAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
 
     // Upgrade should now fail
     try {
@@ -329,10 +331,9 @@
       if (!msg.contains("Failed to load FSImage file")) {
         throw ioe;
       }
-      int md5failures = org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
+      int md5failures = appender.countExceptionsWithMessage(
           " is corrupt with MD5 checksum of ");
       assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
-      logCapturer.stopCapturing();
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
index c792386..c57ef94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
@@ -26,7 +26,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -48,7 +48,7 @@
 
   @Test(timeout = 60000)
   public void testDfsClient() throws IOException, InterruptedException {
-    LogCapturer logs = LogCapturer.captureLogs(LoggerFactory
+    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory
         .getLogger(DataStreamer.class));
     byte[] toWrite = new byte[PACKET_SIZE];
     new Random(1).nextBytes(toWrite);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
index 4299c11..f9336fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
@@ -51,7 +51,7 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.junit.After;
 import org.junit.Before;
@@ -168,9 +168,9 @@
 
     FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
 
-    LogCapturer logs = LogCapturer.captureLogs(
+    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
         LoggerFactory.getLogger(SaslDataTransferServer.class));
-    LogCapturer logs1 = LogCapturer.captureLogs(
+    LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
         LoggerFactory.getLogger(DataTransferSaslUtil.class));
     try {
       assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
@@ -239,7 +239,7 @@
     Mockito.doReturn(false).when(spyClient).shouldEncryptData();
     DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
 
-    LogCapturer logs = LogCapturer.captureLogs(
+    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
         LoggerFactory.getLogger(DataNode.class));
     try {
       assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
@@ -457,9 +457,9 @@
 
     fs = getFileSystem(conf);
 
-    LogCapturer logs = LogCapturer.captureLogs(
+    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
         LoggerFactory.getLogger(SaslDataTransferServer.class));
-    LogCapturer logs1 = LogCapturer.captureLogs(
+    LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
         LoggerFactory.getLogger(DataTransferSaslUtil.class));
     try {
       writeTestDataToFile(fs);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
index c656128..3dd0b7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
@@ -54,7 +54,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Rule;
@@ -138,7 +138,7 @@
     HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
     clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");
 
-    LogCapturer logs = LogCapturer.captureLogs(
+    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
         LoggerFactory.getLogger(DataNode.class));
     try {
       doTest(clientConf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
index 84b7c8f..82b8b58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
@@ -30,7 +30,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
 import org.junit.After;
 import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index 5d2a927..d69051c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -56,7 +56,7 @@
 import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Test;
 import org.slf4j.LoggerFactory;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
index 7e926a9..d32cde8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
@@ -28,7 +28,6 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.Whitebox;
 
 import org.assertj.core.api.Assertions;
@@ -236,8 +235,8 @@
   public void testCheckSafeMode9() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, 3000);
-    LogCapturer logs =
-        LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
+    GenericTestUtils.LogCapturer logs =
+        GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
     BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm,
         fsn, true, conf);
     String content = logs.getOutput();
@@ -248,8 +247,8 @@
   public void testCheckSafeMode10(){
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, -1);
-    LogCapturer logs =
-            LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
+    GenericTestUtils.LogCapturer logs =
+            GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
     BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm,
             fsn, true, conf);
     String content = logs.getOutput();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index 87c8383..ea7347f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -58,7 +58,7 @@
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.slf4j.LoggerFactory;
@@ -575,7 +575,7 @@
         new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     cluster.waitActive();
     DFSTestUtil.setNameNodeLogLevel(Level.DEBUG);
-    LogCapturer logs = LogCapturer
+    LogCapturer logs = GenericTestUtils.LogCapturer
         .captureLogs(LoggerFactory.getLogger("BlockStateChange"));
     BlockManager bm = cluster.getNamesystem().getBlockManager();
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index c4b5f7a..20163cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -21,6 +21,7 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
@@ -40,7 +41,6 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.ContentSummary;
@@ -49,6 +49,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -66,15 +67,16 @@
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
-import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class TestReplicationPolicy extends BaseReplicationPolicyTest {
@@ -505,26 +507,26 @@
           2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
           (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
     }
-
-    final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
-
+    
+    final LogVerificationAppender appender = new LogVerificationAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
+    
     // try to choose NUM_OF_DATANODES which is more than actually available
     // nodes.
     DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length);
     assertEquals(targets.length, dataNodes.length - 2);
 
-    boolean isFound = false;
-    for (String logLine : logCapturer.getOutput().split("\n")) {
-      // Suppose to place replicas on each node but two data nodes are not
-      // available for placing replica, so here we expect a short of 2
-      if(logLine.contains("WARN") && logLine.contains("in need of 2")) {
-        isFound = true;
-        break;
-      }
-    }
-    assertTrue("Could not find the block placement log specific to 2 datanodes not being "
-            + "available for placing replicas", isFound);
-    logCapturer.stopCapturing();
+    final List<LoggingEvent> log = appender.getLog();
+    assertNotNull(log);
+    assertFalse(log.size() == 0);
+    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
+    
+    assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
+    // Suppose to place replicas on each node but two data nodes are not
+    // available for placing replica, so here we expect a short of 2
+    assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
+
     resetHeartbeatForStorages();
   }
 
@@ -1708,14 +1710,17 @@
 
   @Test
   public void testChosenFailureForStorageType() {
-    final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    final LogVerificationAppender appender = new LogVerificationAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
+
     DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1,
         dataNodes[0], new ArrayList<DatanodeStorageInfo>(), false, null,
         BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy(
             HdfsConstants.StoragePolicy.COLD.value()), null);
     assertEquals(0, targets.length);
     assertNotEquals(0,
-        StringUtils.countMatches(logCapturer.getOutput(), "NO_REQUIRED_STORAGE_TYPE"));
+        appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE"));
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
index 13efcf7..73201ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
@@ -27,6 +27,7 @@
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.Collections;
+import java.util.List;
 import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
@@ -38,15 +39,19 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AsyncAppender;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
+import java.util.function.Supplier;
+
 /**
  * Test periodic logging of DataNode metrics.
  */
@@ -123,13 +128,13 @@
   }
 
   @Test
-  @SuppressWarnings("unchecked")
   public void testMetricsLoggerIsAsync() throws IOException {
     startDNForTest(true);
     assertNotNull(dn);
-    assertTrue(Collections.list(
-            org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME).getAllAppenders())
-        .get(0) instanceof org.apache.log4j.AsyncAppender);
+    org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME);
+    @SuppressWarnings("unchecked")
+    List<Appender> appenders = Collections.list(logger.getAllAppenders());
+    assertTrue(appenders.get(0) instanceof AsyncAppender);
   }
 
   /**
@@ -144,15 +149,27 @@
         metricsProvider);
     startDNForTest(true);
     assertNotNull(dn);
-    LogCapturer logCapturer =
-        LogCapturer.captureLogs(LoggerFactory.getLogger(DataNode.METRICS_LOG_NAME));
+    final PatternMatchingAppender appender =
+        (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME)
+            .getAppender("PATTERNMATCHERAPPENDER");
+
     // Ensure that the supplied pattern was matched.
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"),
-        1000, 60000);
-    logCapturer.stopCapturing();
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return appender.isMatched();
+      }
+    }, 1000, 60000);
+
     dn.shutdown();
   }
 
+  private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
+    @SuppressWarnings("unchecked")
+    List<Appender> appenders = Collections.list(logger.getAllAppenders());
+    ((AsyncAppender) appenders.get(0)).addAppender(appender);
+  }
+
   public interface TestFakeMetricMXBean {
     int getFakeMetric();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 82d7a81..74c70ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -27,6 +27,7 @@
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -76,9 +77,10 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.util.Time;
+import org.apache.log4j.SimpleLayout;
+import org.apache.log4j.WriterAppender;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -412,9 +414,14 @@
   @Test(timeout=600000)
   public void testScanDirectoryStructureWarn() throws Exception {
 
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     //add a logger stream to check what has printed to log
+    ByteArrayOutputStream loggerStream = new ByteArrayOutputStream();
+    org.apache.log4j.Logger rootLogger =
+        org.apache.log4j.Logger.getRootLogger();
     GenericTestUtils.setRootLogLevel(Level.INFO);
+    WriterAppender writerAppender =
+        new WriterAppender(new SimpleLayout(), loggerStream);
+    rootLogger.addAppender(writerAppender);
 
     Configuration conf = getConfiguration();
     cluster = new MiniDFSCluster
@@ -445,7 +452,7 @@
       scan(1, 1, 0, 1, 0, 0, 0);
 
       //ensure the warn log not appear and missing block log do appear
-      String logContent = logCapturer.getOutput();
+      String logContent = new String(loggerStream.toByteArray());
       String missingBlockWarn = "Deleted a metadata file" +
           " for the deleted block";
       String dirStructureWarnLog = " found in invalid directory." +
@@ -457,7 +464,6 @@
       LOG.info("check pass");
 
     } finally {
-      logCapturer.stopCapturing();
       if (scanner != null) {
         scanner.shutdown();
         scanner = null;
@@ -520,7 +526,7 @@
       client = cluster.getFileSystem().getClient();
       conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
       // log trace
-      LogCapturer logCapturer = LogCapturer.
+      GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.
           captureLogs(NameNode.stateChangeLog);
       // Add files with 5 blocks
       createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 5, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
index c7fc71f..8b1a6c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
@@ -18,8 +18,6 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import net.jcip.annotations.NotThreadSafe;
-
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
@@ -53,6 +51,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -80,10 +79,10 @@
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
+import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -394,7 +393,9 @@
     }
 
     // nth file should hit a capacity exception
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    final LogVerificationAppender appender = new LogVerificationAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
     setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));
 
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -402,12 +403,11 @@
       public Boolean get() {
         // check the log reported by FsDataSetCache
         // in the case that cache capacity is exceeded.
-        int lines = StringUtils.countMatches(logCapturer.getOutput(),
+        int lines = appender.countLinesWithMessage(
             "could not reserve more bytes in the cache: ");
         return lines > 0;
       }
     }, 500, 30000);
-    logCapturer.stopCapturing();
     // Also check the metrics for the failure
     assertTrue("Expected more than 0 failed cache attempts",
         fsd.getNumBlocksFailedToCache() > 0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index 8f3ef44..073bb53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -16,7 +16,6 @@
  */
 package org.apache.hadoop.hdfs.server.diskbalancer;
 
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Preconditions;
 import java.util.function.Supplier;
 import org.apache.commons.codec.digest.DigestUtils;
@@ -322,7 +321,7 @@
         0);
     DFSTestUtil.waitReplication(fs, filePath, (short) 1);
 
-    LogCapturer logCapturer = LogCapturer
+    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(DiskBalancer.LOG);
 
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java
new file mode 100644
index 0000000..f099dfa
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.regex.Pattern;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.spi.LoggingEvent;
+
+/**
+ * An appender that matches logged messages against the given
+ * regular expression.
+ */
+public class PatternMatchingAppender extends AppenderSkeleton {
+  private final Pattern pattern;
+  private volatile boolean matched;
+
+  public PatternMatchingAppender() {
+    this.pattern = Pattern.compile("^.*FakeMetric.*$");
+    this.matched = false;
+  }
+
+  public boolean isMatched() {
+    return matched;
+  }
+
+  @Override
+  protected void append(LoggingEvent event) {
+    if (pattern.matcher(event.getMessage().toString()).matches()) {
+      matched = true;
+    }
+  }
+
+  @Override
+  public void close() {
+  }
+
+  @Override
+  public boolean requiresLayout() {
+    return false;
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index 617f38a..c00649a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -37,7 +37,7 @@
 import org.apache.hadoop.security.authorize.ProxyServers;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.util.Lists;
 
 import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
index fec16c1..d34d6ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
@@ -41,7 +41,7 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
index 953d1ef..0f73669 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -24,6 +24,7 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.List;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
@@ -38,9 +39,12 @@
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AsyncAppender;
+import org.apache.log4j.Logger;
 
 import org.junit.After;
 import org.junit.AfterClass;
@@ -103,7 +107,6 @@
   UserGroupInformation userGroupInfo;
 
   @Before
-  @SuppressWarnings("unchecked")
   public void setupCluster() throws Exception {
     // must configure prior to instantiating the namesystem because it
     // will reconfigure the logger if async is enabled
@@ -119,9 +122,11 @@
     util.createFiles(fs, fileName);
 
     // make sure the appender is what it's supposed to be
-    assertTrue(Collections.list(org.apache.log4j.Logger.getLogger(
-            "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit").getAllAppenders())
-        .get(0) instanceof org.apache.log4j.AsyncAppender);
+    Logger logger = org.apache.log4j.Logger.getLogger(
+        "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit");
+    @SuppressWarnings("unchecked")
+    List<Appender> appenders = Collections.list(logger.getAllAppenders());
+    assertTrue(appenders.get(0) instanceof AsyncAppender);
     
     fnames = util.getFileNames(fileName);
     util.waitReplication(fs, fileName, (short)3);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index ccc6be3..d675dcd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -82,7 +82,7 @@
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil.ExitException;
@@ -863,7 +863,7 @@
         savedSd = sd;
       }
       
-      LogCapturer logs = LogCapturer.captureLogs(
+      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
           LoggerFactory.getLogger(Storage.class));
       try {
         // try to lock the storage that's already locked
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index 73aee34..771caef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -49,7 +49,7 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index c68ad18..17803a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -83,7 +83,6 @@
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
 import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil;
@@ -91,6 +90,9 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -1715,13 +1717,36 @@
     }
   }
 
+  class TestAppender extends AppenderSkeleton {
+    private final List<LoggingEvent> log = new ArrayList<>();
+
+    @Override
+    public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override
+    protected void append(final LoggingEvent loggingEvent) {
+      log.add(loggingEvent);
+    }
+
+    @Override
+    public void close() {
+    }
+
+    public List<LoggingEvent> getLog() {
+      return new ArrayList<>(log);
+    }
+  }
+
   /**
    *
    * @throws Exception
    */
   @Test
   public void testReadActivelyUpdatedLog() throws Exception {
-    final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    final TestAppender appender = new TestAppender();
+    LogManager.getRootLogger().addAppender(appender);
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     // Set single handler thread, so all transactions hit same thread-local ops.
@@ -1769,16 +1794,21 @@
       rwf.close();
 
       events.poll();
-      for (String logLine : logCapturer.getOutput().split("\n")) {
-        if (logLine != null && logLine.contains("Caught exception after reading")) {
+      String pattern = "Caught exception after reading (.*) ops";
+      Pattern r = Pattern.compile(pattern);
+      final List<LoggingEvent> log = appender.getLog();
+      for (LoggingEvent event : log) {
+        Matcher m = r.matcher(event.getRenderedMessage());
+        if (m.find()) {
           fail("Should not try to read past latest syned edit log op");
         }
       }
+
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
-      logCapturer.stopCapturing();
+      LogManager.getRootLogger().removeAppender(appender);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
index fb484cd..3b15c2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
@@ -26,8 +26,6 @@
 
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -120,8 +118,8 @@
     op3.setTransactionId(3);
     buffer.writeOp(op3, fakeLogVersion);
 
-    LogCapturer logs =
-        LogCapturer.captureLogs(EditsDoubleBuffer.LOG);
+    GenericTestUtils.LogCapturer logs =
+        GenericTestUtils.LogCapturer.captureLogs(EditsDoubleBuffer.LOG);
     try {
       buffer.close();
       fail();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 860e6b0..89193ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -64,7 +64,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.FakeTimer;
 import org.slf4j.event.Level;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
index afb0491..f0ae181 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.util.FakeTimer;
 import org.apache.hadoop.util.Time;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
index 08c9240..9c77f9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
@@ -29,8 +29,6 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -60,7 +58,7 @@
   private MiniDFSCluster cluster;
   private FileSystem fs;
   private UserGroupInformation userGroupInfo;
-  private LogCapturer logs;
+  private GenericTestUtils.LogCapturer logs;
 
   @Before
   public void setUp() throws Exception {
@@ -78,7 +76,7 @@
     userGroupInfo = UserGroupInformation.createUserForTesting("bob",
         new String[] {"hadoop"});
 
-    logs = LogCapturer.captureLogs(FSNamesystem.LOG);
+    logs = GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.LOG);
     GenericTestUtils
         .setLogLevel(LoggerFactory.getLogger(FSNamesystem.class.getName()),
         org.slf4j.event.Level.INFO);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 96650a4..a312b03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -115,7 +115,7 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.AfterClass;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
index 651d4f3..464fdfc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
@@ -18,13 +18,15 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.util.function.Supplier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AsyncAppender;
 
 import org.junit.Rule;
 import org.junit.Test;
@@ -32,6 +34,7 @@
 
 import java.io.IOException;
 import java.util.Collections;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
@@ -61,12 +64,12 @@
   }
 
   @Test
-  @SuppressWarnings("unchecked")
   public void testMetricsLoggerIsAsync() throws IOException {
     makeNameNode(true);
     org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME);
-    assertTrue(Collections.list(logger.getAllAppenders()).get(0)
-        instanceof org.apache.log4j.AsyncAppender);
+    @SuppressWarnings("unchecked")
+    List<Appender> appenders = Collections.list(logger.getAllAppenders());
+    assertTrue(appenders.get(0) instanceof AsyncAppender);
   }
 
   /**
@@ -77,14 +80,20 @@
   public void testMetricsLogOutput()
       throws IOException, InterruptedException, TimeoutException {
     TestFakeMetric metricsProvider = new TestFakeMetric();
-    MBeans.register(this.getClass().getSimpleName(), "DummyMetrics", metricsProvider);
+    MBeans.register(this.getClass().getSimpleName(),
+        "DummyMetrics", metricsProvider);
     makeNameNode(true);     // Log metrics early and often.
-    LogCapturer logCapturer =
-        LogCapturer.captureLogs(LoggerFactory.getLogger(NameNode.METRICS_LOG_NAME));
+    final PatternMatchingAppender appender =
+        (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME)
+            .getAppender("PATTERNMATCHERAPPENDER");
 
-    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"),
-        1000, 60000);
-    logCapturer.stopCapturing();
+    // Ensure that the supplied pattern was matched.
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return appender.isMatched();
+      }
+    }, 1000, 60000);
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
index 8750154..073ee37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
@@ -28,8 +28,7 @@
 import org.junit.Test;
 
 import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 
 public class TestNameNodeResourcePolicy {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 7ea0b24..67c8f3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -52,6 +52,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -68,12 +69,12 @@
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -523,8 +524,10 @@
         // Corrupt the md5 files in all the namedirs
         corruptFSImageMD5(true);
 
-      // Attach our own log appender so we can verify output
-      LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+        // Attach our own log appender so we can verify output
+        final LogVerificationAppender appender = new LogVerificationAppender();
+        final Logger logger = Logger.getRootLogger();
+        logger.addAppender(appender);
 
         // Try to start a new cluster
         LOG.info("\n===========================================\n" +
@@ -538,13 +541,10 @@
         } catch (IOException ioe) {
           GenericTestUtils.assertExceptionContains(
               "Failed to load FSImage file", ioe);
-
-          int md5failures =
-              org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
-                  " is corrupt with MD5 checksum of ");
+          int md5failures = appender.countExceptionsWithMessage(
+              " is corrupt with MD5 checksum of ");
           // Two namedirs, so should have seen two failures
           assertEquals(2, md5failures);
-          logCapturer.stopCapturing();
         }
     } finally {
       if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index 7376237..0e83bec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -43,7 +43,7 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -197,7 +197,7 @@
 
     // Trying to bootstrap standby should now fail since the edit
     // logs aren't available in the shared dir.
-    LogCapturer logs = LogCapturer.captureLogs(
+    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
         LoggerFactory.getLogger(BootstrapStandby.class));
     try {
       assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
index 6fa979d..1682731 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
@@ -44,7 +44,6 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.Whitebox;
 import org.junit.After;
 import org.junit.Before;
@@ -144,7 +143,7 @@
         () -> (DistributedFileSystem) FileSystem.get(conf));
 
     GenericTestUtils.setLogLevel(ObserverReadProxyProvider.LOG, Level.DEBUG);
-    LogCapturer logCapture = LogCapturer
+    GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer
         .captureLogs(ObserverReadProxyProvider.LOG);
     try {
       dfs.access(new Path("/"), FsAction.READ);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index 3dbadca..513f60c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.common.Util;
@@ -47,12 +48,12 @@
 import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.ipc.StandbyException;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ThreadUtil;
+import org.apache.log4j.spi.LoggingEvent;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -298,38 +299,39 @@
   @Test(timeout = 30000)
   public void testCheckpointBeforeNameNodeInitializationIsComplete()
       throws Exception {
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    final LogVerificationAppender appender = new LogVerificationAppender();
+    final org.apache.log4j.Logger logger = org.apache.log4j.Logger
+        .getRootLogger();
+    logger.addAppender(appender);
 
-    try {
-      // Transition 2 to observer
-      cluster.transitionToObserver(2);
-      doEdits(0, 10);
-      // After a rollEditLog, Standby(nn1)'s next checkpoint would be
-      // ahead of observer(nn2).
-      nns[0].getRpcServer().rollEditLog();
+    // Transition 2 to observer
+    cluster.transitionToObserver(2);
+    doEdits(0, 10);
+    // After a rollEditLog, Standby(nn1)'s next checkpoint would be
+    // ahead of observer(nn2).
+    nns[0].getRpcServer().rollEditLog();
 
-      NameNode nn2 = nns[2];
-      FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
+    NameNode nn2 = nns[2];
+    FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
 
-      // After standby creating a checkpoint, it will try to push the image to
-      // active and all observer, updating it's own txid to the most recent.
-      HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
-      HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
+    // After standby creating a checkpoint, it will try to push the image to
+    // active and all observer, updating it's own txid to the most recent.
+    HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
+    HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
 
-      NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
-      cluster.transitionToStandby(2);
+    NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
+    cluster.transitionToStandby(2);
+    logger.removeAppender(appender);
 
-      for (String logLine : logCapturer.getOutput().split("\n")) {
-        if (logLine != null && logLine.contains("PutImage failed") && logLine.contains(
-            "FSImage has not been set in the NameNode.")) {
-          //Logs have the expected exception.
-          return;
-        }
+    for (LoggingEvent event : appender.getLog()) {
+      String message = event.getRenderedMessage();
+      if (message.contains("PutImage failed") &&
+          message.contains("FSImage has not been set in the NameNode.")) {
+        //Logs have the expected exception.
+        return;
       }
-      fail("Expected exception not present in logs.");
-    } finally {
-      logCapturer.stopCapturing();
     }
+    fail("Expected exception not present in logs.");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 3741bbf..58d72f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -93,7 +93,7 @@
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.junit.After;
@@ -1372,7 +1372,7 @@
       Path filePath = new Path("/zeroSizeFile");
       DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0);
       fs.setReplication(filePath, (short) 3);
-      LogCapturer logs = LogCapturer.captureLogs(
+      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
           LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class));
       fs.setStoragePolicy(filePath, "COLD");
       fs.satisfyStoragePolicy(filePath);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
index b739b25..368deef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
@@ -22,6 +22,9 @@
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
 
+# Only to be used for testing
+log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender
+
 #
 # NameNode metrics logging.
 # The default is to retain two namenode-metrics.log files up to 64MB each.
@@ -29,10 +32,10 @@
 
 # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as
 # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref
-namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA
+namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA,PATTERNMATCHERAPPENDER
 log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
 log4j.additivity.NameNodeMetricsLog=false
-log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender
+log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender
 log4j.appender.ASYNCNNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n
 log4j.appender.ASYNCNNMETRICSRFA.maxFileSize=64MB
 log4j.appender.ASYNCNNMETRICSRFA.fileName=${hadoop.log.dir}/namenode-metrics.log
@@ -45,10 +48,10 @@
 
 # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as
 # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref
-datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA
+datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA,PATTERNMATCHERAPPENDER
 log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
 log4j.additivity.DataNodeMetricsLog=false
-log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender
+log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender
 log4j.appender.ASYNCDNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n
 log4j.appender.ASYNCDNMETRICSRFA.maxFileSize=64MB
 log4j.appender.ASYNCDNMETRICSRFA.fileName=${hadoop.log.dir}/datanode-metrics.log
@@ -69,7 +72,7 @@
 hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
 log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.logging.appenders.AsyncRFAAppender
+log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.hdfs.util.AsyncRFAAppender
 log4j.appender.ASYNCAUDITAPPENDER.blocking=false
 log4j.appender.ASYNCAUDITAPPENDER.bufferSize=256
 log4j.appender.ASYNCAUDITAPPENDER.conversionPattern=%m%n
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index 142c1ab..e3b3511 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -124,12 +124,6 @@
       <artifactId>assertj-core</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
index cb5f3ed..15682ee 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -36,10 +36,9 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
-
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent;
 import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
 import org.junit.After;
@@ -108,10 +107,12 @@
 import org.apache.hadoop.yarn.util.ControlledClock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
@@ -127,6 +128,29 @@
     }
   }
 
+  private static class TestAppender extends AppenderSkeleton {
+
+    private final List<LoggingEvent> logEvents = new CopyOnWriteArrayList<>();
+
+    @Override
+    public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override
+    public void close() {
+    }
+
+    @Override
+    protected void append(LoggingEvent arg0) {
+      logEvents.add(arg0);
+    }
+
+    private List<LoggingEvent> getLogEvents() {
+      return logEvents;
+    }
+  }
+
   @BeforeClass
   public static void setupBeforeClass() {
     ResourceUtils.resetResourceTypes(new Configuration());
@@ -1700,10 +1724,11 @@
     for (String memoryName : ImmutableList.of(
         MRJobConfig.RESOURCE_TYPE_NAME_MEMORY,
         MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) {
-      final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class);
-      LogCapturer logCapturer = LogCapturer.captureLogs(logger);
+      TestAppender testAppender = new TestAppender();
+      final Logger logger = Logger.getLogger(TaskAttemptImpl.class);
       try {
         TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear();
+        logger.addAppender(testAppender);
         EventHandler eventHandler = mock(EventHandler.class);
         Clock clock = SystemClock.getInstance();
         JobConf jobConf = new JobConf();
@@ -1716,11 +1741,13 @@
             getResourceInfoFromContainerRequest(taImpl, eventHandler).
             getMemorySize();
         assertEquals(3072, memorySize);
-        assertTrue(logCapturer.getOutput().contains(
-            "Configuration " + "mapreduce.reduce.resource." + memoryName + "=3Gi is "
-                + "overriding the mapreduce.reduce.memory.mb=2048 configuration"));
+        assertTrue(testAppender.getLogEvents().stream()
+            .anyMatch(e -> e.getLevel() == Level.WARN && ("Configuration " +
+                "mapreduce.reduce.resource." + memoryName + "=3Gi is " +
+                "overriding the mapreduce.reduce.memory.mb=2048 configuration")
+                    .equals(e.getMessage())));
       } finally {
-        logCapturer.stopCapturing();
+        logger.removeAppender(testAppender);
       }
     }
   }
@@ -1782,9 +1809,10 @@
 
   @Test
   public void testReducerCpuRequestOverriding() {
-    final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class);
-    final LogCapturer logCapturer = LogCapturer.captureLogs(logger);
+    TestAppender testAppender = new TestAppender();
+    final Logger logger = Logger.getLogger(TaskAttemptImpl.class);
     try {
+      logger.addAppender(testAppender);
       EventHandler eventHandler = mock(EventHandler.class);
       Clock clock = SystemClock.getInstance();
       JobConf jobConf = new JobConf();
@@ -1797,11 +1825,13 @@
           getResourceInfoFromContainerRequest(taImpl, eventHandler).
           getVirtualCores();
       assertEquals(7, vCores);
-      assertTrue(logCapturer.getOutput().contains(
-          "Configuration " + "mapreduce.reduce.resource.vcores=7 is overriding the "
-              + "mapreduce.reduce.cpu.vcores=9 configuration"));
+      assertTrue(testAppender.getLogEvents().stream().anyMatch(
+          e -> e.getLevel() == Level.WARN && ("Configuration " +
+              "mapreduce.reduce.resource.vcores=7 is overriding the " +
+              "mapreduce.reduce.cpu.vcores=9 configuration").equals(
+                  e.getMessage())));
     } finally {
-      logCapturer.stopCapturing();
+      logger.removeAppender(testAppender);
     }
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index d124c97..7530428 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -72,12 +72,6 @@
       <artifactId>assertj-core</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
index 43ab170..a0223de 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
@@ -23,10 +23,12 @@
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.Flushable;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.util.ArrayList;
+import java.util.Enumeration;
 import java.util.List;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
@@ -42,13 +44,16 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.util.ProcessTree;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.log4j.Appender;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
@@ -271,7 +276,42 @@
     }
 
     // flush & close all appenders
-    HadoopLoggerUtils.shutdownLogManager();
+    LogManager.shutdown(); 
+  }
+
+  @SuppressWarnings("unchecked")
+  public static synchronized void syncLogs() {
+    // flush standard streams
+    //
+    System.out.flush();
+    System.err.flush();
+
+    // flush flushable appenders
+    //
+    final Logger rootLogger = Logger.getRootLogger();
+    flushAppenders(rootLogger);
+    final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository().
+      getCurrentLoggers();
+    while (allLoggers.hasMoreElements()) {
+      final Logger l = allLoggers.nextElement();
+      flushAppenders(l);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private static void flushAppenders(Logger l) {
+    final Enumeration<Appender> allAppenders = l.getAllAppenders();
+    while (allAppenders.hasMoreElements()) {
+      final Appender a = allAppenders.nextElement();
+      if (a instanceof Flushable) {
+        try {
+          ((Flushable) a).flush();
+        } catch (IOException ioe) {
+          System.err.println(a + ": Failed to flush!"
+            + StringUtils.stringifyException(ioe));
+        }
+      }
+    }
   }
 
   public static ScheduledExecutorService createLogSyncer() {
@@ -296,7 +336,7 @@
         new Runnable() {
           @Override
           public void run() {
-            HadoopLoggerUtils.syncLogs();
+            TaskLog.syncLogs();
           }
         }, 0L, 5L, TimeUnit.SECONDS);
     return scheduler;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
index f83835f..e91b4c1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
@@ -28,19 +28,24 @@
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.LineNumberReader;
+import java.io.StringReader;
 
 import org.junit.Before;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.mapred.TaskReport;
 import org.apache.hadoop.mapreduce.JobStatus.State;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.WriterAppender;
 import org.mockito.stubbing.Answer;
-import org.slf4j.LoggerFactory;
 
 /**
  * Test to make sure that command line output for 
@@ -68,53 +73,55 @@
 
   @Test
   public void testJobMonitorAndPrint() throws Exception {
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(Job.class));
-    try {
-      JobStatus jobStatus_1 =
-          new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING,
-              JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url",
-              true);
-      JobStatus jobStatus_2 =
-          new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH,
-              "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true);
+    JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f,
+        0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
+        "tmp-queue", "tmp-jobfile", "tmp-url", true);
+    JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f,
+        1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname",
+        "tmp-queue", "tmp-jobfile", "tmp-url", true);
 
-      doAnswer((Answer<TaskCompletionEvent[]>) invocation -> TaskCompletionEvent.EMPTY_ARRAY).when(
-          job).getTaskCompletionEvents(anyInt(), anyInt());
+    doAnswer((Answer<TaskCompletionEvent[]>) invocation ->
+        TaskCompletionEvent.EMPTY_ARRAY).when(job)
+        .getTaskCompletionEvents(anyInt(), anyInt());
 
-      doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
-      when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
+    doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
+    when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
+    // setup the logger to capture all logs
+    Layout layout =
+        Logger.getRootLogger().getAppender("stdout").getLayout();
+    ByteArrayOutputStream os = new ByteArrayOutputStream();
+    WriterAppender appender = new WriterAppender(layout, os);
+    appender.setThreshold(Level.ALL);
+    Logger qlogger = Logger.getLogger(Job.class);
+    qlogger.addAppender(appender);
 
-      job.monitorAndPrintJob();
+    job.monitorAndPrintJob();
 
-      boolean foundHundred = false;
-      boolean foundComplete = false;
-      boolean foundUber = false;
-      String uberModeMatch = "uber mode : true";
-      String progressMatch = "map 100% reduce 100%";
-      String completionMatch = "completed successfully";
-      for (String logLine : logCapturer.getOutput().split("\n")) {
-        if (logLine.contains(uberModeMatch)) {
-          foundUber = true;
-        }
-        if (logLine.contains(progressMatch)) {
-          foundHundred = true;
-        }
-        if (logLine.contains(completionMatch)) {
-          foundComplete = true;
-        }
-        if (foundUber && foundHundred && foundComplete) {
-          break;
-        }
+    qlogger.removeAppender(appender);
+    LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
+    String line;
+    boolean foundHundred = false;
+    boolean foundComplete = false;
+    boolean foundUber = false;
+    String uberModeMatch = "uber mode : true";
+    String progressMatch = "map 100% reduce 100%";
+    String completionMatch = "completed successfully";
+    while ((line = r.readLine()) != null) {
+      if (line.contains(uberModeMatch)) {
+        foundUber = true;
       }
-      assertTrue(foundUber);
-      assertTrue(foundHundred);
-      assertTrue(foundComplete);
-
-      System.out.println("The output of job.toString() is : \n" + job.toString());
-      assertTrue(job.toString().contains("Number of maps: 5\n"));
-      assertTrue(job.toString().contains("Number of reduces: 5\n"));
-    } finally {
-      logCapturer.stopCapturing();
+      foundHundred = line.contains(progressMatch);      
+      if (foundHundred)
+        break;
     }
+    line = r.readLine();
+    foundComplete = line.contains(completionMatch);
+    assertTrue(foundUber);
+    assertTrue(foundHundred);
+    assertTrue(foundComplete);
+
+    System.out.println("The output of job.toString() is : \n" + job.toString());
+    assertTrue(job.toString().contains("Number of maps: 5\n"));
+    assertTrue(job.toString().contains("Number of reduces: 5\n"));
   }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 632e972..17358a3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -128,12 +128,6 @@
       <artifactId>assertj-core</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
   </dependencies>
 
  <profiles>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
index 063f185..0bdc721 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
@@ -34,6 +34,7 @@
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -44,6 +45,7 @@
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.conf.Configuration;
@@ -53,7 +55,6 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.JobPriority;
 import org.apache.hadoop.mapreduce.JobStatus.State;
@@ -109,6 +110,13 @@
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.SimpleLayout;
+import org.apache.log4j.WriterAppender;
+import org.apache.log4j.spi.LoggingEvent;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -136,6 +144,29 @@
           MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.lastIndexOf("%"));
   private static final String CUSTOM_RESOURCE_NAME = "a-custom-resource";
 
+  private static class TestAppender extends AppenderSkeleton {
+
+    private final List<LoggingEvent> logEvents = new CopyOnWriteArrayList<>();
+
+    @Override
+    public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override
+    public void close() {
+    }
+
+    @Override
+    protected void append(LoggingEvent arg0) {
+      logEvents.add(arg0);
+    }
+
+    private List<LoggingEvent> getLogEvents() {
+      return logEvents;
+    }
+  }
+
   private YARNRunner yarnRunner;
   private ResourceMgrDelegate resourceMgrDelegate;
   private YarnConfiguration conf;
@@ -518,48 +549,38 @@
       assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex);
     }
   }
-
   @Test(timeout=20000)
   public void testWarnCommandOpts() throws Exception {
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class));
-    try {
-      JobConf jobConf = new JobConf();
-
-      jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
-          "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
-      jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
-
-      YARNRunner yarnRunner = new YARNRunner(jobConf);
-
-      @SuppressWarnings("unused")
-      ApplicationSubmissionContext submissionContext = buildSubmitContext(yarnRunner, jobConf);
-
-      boolean isFoundOne = false;
-      boolean isFoundTwo = false;
-      for (String logLine : logCapturer.getOutput().split("\n")) {
-        if (logLine == null) {
-          continue;
-        }
-        if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in "
-            + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no "
-            + "longer function if hadoop native libraries are used. These values "
-            + "should be set as part of the LD_LIBRARY_PATH in the app master JVM "
-            + "env using yarn.app.mapreduce.am.admin.user.env config settings.")) {
-          isFoundOne = true;
-        }
-        if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in "
-            + "yarn.app.mapreduce.am.command-opts can cause programs to no longer "
-            + "function if hadoop native libraries are used. These values should "
-            + "be set as part of the LD_LIBRARY_PATH in the app master JVM env "
-            + "using yarn.app.mapreduce.am.env config settings.")) {
-          isFoundTwo = true;
-        }
-      }
-      assertTrue(isFoundOne);
-      assertTrue(isFoundTwo);
-    } finally {
-      logCapturer.stopCapturing();
-    }
+    org.apache.log4j.Logger logger =
+        org.apache.log4j.Logger.getLogger(YARNRunner.class);
+    
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    Layout layout = new SimpleLayout();
+    Appender appender = new WriterAppender(layout, bout);
+    logger.addAppender(appender);
+    
+    JobConf jobConf = new JobConf();
+    
+    jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
+    jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
+    
+    YARNRunner yarnRunner = new YARNRunner(jobConf);
+    
+    @SuppressWarnings("unused")
+    ApplicationSubmissionContext submissionContext =
+        buildSubmitContext(yarnRunner, jobConf);
+   
+    String logMsg = bout.toString();
+    assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + 
+    		"yarn.app.mapreduce.am.admin-command-opts can cause programs to no " +
+        "longer function if hadoop native libraries are used. These values " + 
+    		"should be set as part of the LD_LIBRARY_PATH in the app master JVM " +
+        "env using yarn.app.mapreduce.am.admin.user.env config settings."));
+    assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + 
+        "yarn.app.mapreduce.am.command-opts can cause programs to no longer " +
+        "function if hadoop native libraries are used. These values should " +
+        "be set as part of the LD_LIBRARY_PATH in the app master JVM env " +
+        "using yarn.app.mapreduce.am.env config settings."));
   }
 
   @Test(timeout=20000)
@@ -975,7 +996,10 @@
     for (String memoryName : ImmutableList.of(
         MRJobConfig.RESOURCE_TYPE_NAME_MEMORY,
         MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) {
-      LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class));
+      TestAppender testAppender = new TestAppender();
+      org.apache.log4j.Logger  logger =
+          org.apache.log4j.Logger.getLogger(YARNRunner.class);
+      logger.addAppender(testAppender);
       try {
         JobConf jobConf = new JobConf();
         jobConf.set(MRJobConfig.MR_AM_RESOURCE_PREFIX + memoryName, "3 Gi");
@@ -993,17 +1017,13 @@
 
         long memorySize = resourceRequest.getCapability().getMemorySize();
         Assert.assertEquals(3072, memorySize);
-        boolean isLogFound = false;
-        for (String logLine : logCapturer.getOutput().split("\n")) {
-          if (logLine != null && logLine.contains("WARN") && logLine.contains(
-              "Configuration " + "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is "
-                  + "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + "configuration")) {
-            isLogFound = true;
-          }
-        }
-        assertTrue("Log line could not be found", isLogFound);
+        assertTrue(testAppender.getLogEvents().stream().anyMatch(
+            e -> e.getLevel() == Level.WARN && ("Configuration " +
+                "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " +
+                "overriding the yarn.app.mapreduce.am.resource.mb=2048 " +
+                "configuration").equals(e.getMessage())));
       } finally {
-        logCapturer.stopCapturing();
+        logger.removeAppender(testAppender);
       }
     }
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
index cc93e56..338f117 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
@@ -29,6 +29,8 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.HadoopTestCase;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.log4j.Level;
+import org.junit.Before;
 import org.junit.Test;
 
 import static org.junit.Assert.assertTrue;
@@ -74,10 +76,12 @@
                      mapJavaOpts, 
                      mapJavaOpts, MAP_OPTS_VAL);
       }
-
-      String logLevel = conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "INFO");
-      assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel,
-          "OFF");
+      
+      Level logLevel = 
+        Level.toLevel(conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, 
+                               Level.INFO.toString()));  
+      assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + 
+                   logLevel, logLevel, Level.OFF);
     }
   }
   
@@ -104,10 +108,12 @@
                      reduceJavaOpts, 
                      reduceJavaOpts, REDUCE_OPTS_VAL);
       }
-
-      String logLevel = conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "INFO");
-      assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel,
-          "OFF");
+      
+      Level logLevel = 
+        Level.toLevel(conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, 
+                               Level.INFO.toString()));  
+      assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + 
+                   logLevel, logLevel, Level.OFF);
     }
   }
   
@@ -121,9 +127,9 @@
       conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, MAP_OPTS_VAL);
       conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, REDUCE_OPTS_VAL);
     }
-
-    conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "OFF");
-    conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "OFF");
+    
+    conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, Level.OFF.toString());
+    conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, Level.OFF.toString());
     
     Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 
                 numMaps, numReds);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
index d1fc8c0..9e58d46 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
@@ -25,7 +25,6 @@
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.Assert;
@@ -51,6 +50,8 @@
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -63,7 +64,8 @@
   @Test
   public void testDelegationToken() throws Exception {
 
-    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+    org.apache.log4j.Logger rootLogger = LogManager.getRootLogger();
+    rootLogger.setLevel(Level.DEBUG);
 
     final YarnConfiguration conf = new YarnConfiguration(new JobConf());
     // Just a random principle
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
index f653ce7..43d3abe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
@@ -99,6 +99,7 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager.WorkflowPriorityMapping;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -556,9 +557,9 @@
           systemClasses);
     }
     sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB);
-    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL");
-    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL");
-    sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, "ALL");
+    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
+    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
+    sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
     sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
     final SleepJob sleepJob = new SleepJob();
     sleepJob.setConf(sleepConf);
@@ -855,11 +856,11 @@
 
     final SleepJob sleepJob = new SleepJob();
     final JobConf sleepConf = new JobConf(mrCluster.getConfig());
-    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL");
+    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
     final long userLogKb = 4;
     sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb);
     sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3);
-    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL");
+    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
     final long amLogKb = 7;
     sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb);
     sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7);
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3ebab5a..c4dfd2f 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1944,18 +1944,6 @@
         <artifactId>log4j-web</artifactId>
         <version>${log4j2.version}</version>
       </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-logging</artifactId>
-        <version>${hadoop.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-logging</artifactId>
-        <version>${hadoop.version}</version>
-        <scope>test</scope>
-        <type>test-jar</type>
-      </dependency>
     </dependencies>
   </dependencyManagement>
 
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 373b5a0..e8c5fb7 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -349,12 +349,7 @@
       <artifactId>hamcrest-library</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
+
   </dependencies>
 
   <profiles>
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
index 2a124c1..1e7330f 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
@@ -32,7 +32,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
index 6acab8f..476d7a4 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
@@ -23,7 +23,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 
 import org.junit.Test;
 import org.slf4j.Logger;
diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml
index 06c2e19..5194e51 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -83,12 +83,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <scope>test</scope>
       <exclusions>
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
index d54fbaa..aa42cb9 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
@@ -41,7 +41,6 @@
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCp;
 import org.apache.hadoop.tools.DistCpConstants;
@@ -702,8 +701,8 @@
     GenericTestUtils
         .createFiles(remoteFS, source, getDepth(), getWidth(), getWidth());
 
-    LogCapturer log =
-        LogCapturer.captureLogs(SimpleCopyListing.LOG);
+    GenericTestUtils.LogCapturer log =
+        GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG);
 
     String options = "-useiterator -update -delete" + getDefaultCLIOptions();
     DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
index 661573f..02fd48a 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
@@ -27,10 +27,11 @@
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.tools.rumen.datatypes.*;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 
 /**
  * A default parser for MapReduce job configuration properties.
@@ -82,7 +83,7 @@
   
   // turn off the warning w.r.t deprecated mapreduce keys
   static {
-    HadoopLoggerUtils.setLogLevel(Configuration.class.getName(), "OFF");
+    Logger.getLogger(Configuration.class).setLevel(Level.OFF);
   }
     
   // Accepts a key if there is a corresponding key in the current mapreduce
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index d901513..81e8884 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -576,6 +576,16 @@
     <Bug pattern="SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING" />
   </Match>
   
+  <!-- Following fields are used in ErrorsAndWarningsBlock, which is not a part of analysis of findbugs -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender$Element" />
+    <Or>
+      <Field name="count" />
+      <Field name="timestampSeconds" />
+    </Or>
+    <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
+  </Match>
+
   <Match>
     <Class name="org.apache.hadoop.yarn.api.records.ResourceRequest" />
     <Method name="equals" />
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index b41923e..a15c78e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -63,7 +63,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -127,6 +126,7 @@
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.apache.log4j.LogManager;
 
 import org.apache.hadoop.classification.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
@@ -403,7 +403,7 @@
       result = appMaster.finish();
     } catch (Throwable t) {
       LOG.error("Error running ApplicationMaster", t);
-      HadoopLoggerUtils.shutdownLogManager();
+      LogManager.shutdown();
       ExitUtil.terminate(1, t);
     } finally {
       if (appMaster != null) {
@@ -529,7 +529,7 @@
     //Check whether customer log4j.properties file exists
     if (fileExist(log4jPath)) {
       try {
-        HadoopLoggerUtils.updateLog4jConfiguration(ApplicationMaster.class,
+        Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class,
             log4jPath);
       } catch (Exception e) {
         LOG.warn("Can not set up custom log4j properties. " + e);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index dc23682..098f398 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -52,7 +52,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -452,7 +451,7 @@
     if (cliParser.hasOption("log_properties")) {
       String log4jPath = cliParser.getOptionValue("log_properties");
       try {
-        HadoopLoggerUtils.updateLog4jConfiguration(Client.class, log4jPath);
+        Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath);
       } catch (Exception e) {
         LOG.warn("Can not set up custom log4j properties. " + e);
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java
new file mode 100644
index 0000000..0301a68
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.applications.distributedshell;
+
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.util.Map.Entry;
+import java.util.Properties;
+
+import org.apache.log4j.LogManager;
+import org.apache.log4j.PropertyConfigurator;
+
+public class Log4jPropertyHelper {
+
+  public static void updateLog4jConfiguration(Class<?> targetClass,
+      String log4jPath) throws Exception {
+    Properties customProperties = new Properties();
+    try (
+        FileInputStream fs = new FileInputStream(log4jPath);
+        InputStream is = targetClass.getResourceAsStream("/log4j.properties")) {
+      customProperties.load(fs);
+      Properties originalProperties = new Properties();
+      originalProperties.load(is);
+      for (Entry<Object, Object> entry : customProperties.entrySet()) {
+        originalProperties.setProperty(entry.getKey().toString(), entry
+            .getValue().toString());
+      }
+      LogManager.resetConfiguration();
+      PropertyConfigurator.configure(originalProperties);
+    }
+  }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java
index 5534653..60c06e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java
@@ -43,6 +43,7 @@
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.client.util.YarnClientUtils;
+import org.apache.log4j.Logger;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.servlet.ServletContextHandler;
@@ -51,8 +52,6 @@
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * Test Spnego Client Login.
@@ -77,7 +76,8 @@
 
   private Map<String, String> props;
   private static Server server;
-  private static Logger LOG = LoggerFactory.getLogger(TestSecureApiServiceClient.class);
+  private static Logger LOG = Logger
+      .getLogger(TestSecureApiServiceClient.class);
   private ApiServiceClient asc;
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
index 52ae876..f8f948d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType;
+import org.apache.log4j.Logger;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -48,6 +49,8 @@
  */
 public class TestComponent {
 
+  static final Logger LOG = Logger.getLogger(TestComponent.class);
+
   @Rule
   public ServiceTestUtils.ServiceFSWatcher rule =
       new ServiceTestUtils.ServiceFSWatcher();
diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
similarity index 93%
rename from hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
index cf7a2bf..fffc8a8 100644
--- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
@@ -16,10 +16,12 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.logging.appenders;
+package org.apache.hadoop.yarn.util;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -111,13 +113,16 @@
   /**
    * Create an appender to keep track of the errors and warnings logged by the
    * system.
-   *
-   * @param cleanupIntervalSeconds the interval at which old messages are purged to prevent the
-   * message stores from growing unbounded.
-   * @param messageAgeLimitSeconds the maximum age of a message in seconds before it is purged from
-   * the store.
-   * @param maxUniqueMessages the maximum number of unique messages of each type we keep before
-   * we start purging.
+   * 
+   * @param cleanupIntervalSeconds
+   *          the interval at which old messages are purged to prevent the
+   *          message stores from growing unbounded
+   * @param messageAgeLimitSeconds
+   *          the maximum age of a message in seconds before it is purged from
+   *          the store
+   * @param maxUniqueMessages
+   *          the maximum number of unique messages of each type we keep before
+   *          we start purging
    */
   public Log4jWarningErrorMetricsAppender(int cleanupIntervalSeconds,
       long messageAgeLimitSeconds, int maxUniqueMessages) {
@@ -138,20 +143,6 @@
     this.setThreshold(Level.WARN);
   }
 
-  private static String join(CharSequence separator, String[] strings) {
-    StringBuilder sb = new StringBuilder();
-    boolean first = true;
-    for (String s : strings) {
-      if (first) {
-        first = false;
-      } else {
-        sb.append(separator);
-      }
-      sb.append(s);
-    }
-    return sb.toString();
-  }
-
   /**
    * {@inheritDoc}
    */
@@ -160,7 +151,7 @@
     String message = event.getRenderedMessage();
     String[] throwableStr = event.getThrowableStrRep();
     if (throwableStr != null) {
-      message = message + "\n" + join("\n", throwableStr);
+      message = message + "\n" + StringUtils.join("\n", throwableStr);
       message =
           org.apache.commons.lang3.StringUtils.left(message, MAX_MESSAGE_SIZE);
     }
@@ -241,7 +232,7 @@
    * getErrorMessagesAndCounts since the message store is purged at regular
    * intervals to prevent it from growing without bounds, while the store for
    * the counts is purged less frequently.
-   *
+   * 
    * @param cutoffs
    *          list of timestamp cutoffs(in seconds) for which the counts are
    *          desired
@@ -257,7 +248,7 @@
    * getWarningMessagesAndCounts since the message store is purged at regular
    * intervals to prevent it from growing without bounds, while the store for
    * the counts is purged less frequently.
-   *
+   * 
    * @param cutoffs
    *          list of timestamp cutoffs(in seconds) for which the counts are
    *          desired
@@ -294,7 +285,7 @@
    * differ from the ones provided by getErrorCounts since the message store is
    * purged at regular intervals to prevent it from growing without bounds,
    * while the store for the counts is purged less frequently.
-   *
+   * 
    * @param cutoffs
    *          list of timestamp cutoffs(in seconds) for which the counts are
    *          desired
@@ -313,7 +304,7 @@
    * may differ from the ones provided by getWarningCounts since the message
    * store is purged at regular intervals to prevent it from growing without
    * bounds, while the store for the counts is purged less frequently.
-   *
+   * 
    * @param cutoffs
    *          list of timestamp cutoffs(in seconds) for which the counts are
    *          desired
@@ -331,7 +322,7 @@
       SortedSet<PurgeElement> purgeInformation) {
     if (purgeInformation.size() > maxUniqueMessages) {
       ErrorAndWarningsCleanup cleanup = new ErrorAndWarningsCleanup();
-      long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000);
+      long cutoff = Time.now() - (messageAgeLimitSeconds * 1000);
       cutoff = (cutoff / 1000);
       cleanup.cleanupMessages(map, purgeInformation, cutoff, maxUniqueMessages);
     }
@@ -388,7 +379,7 @@
 
     @Override
     public void run() {
-      long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000);
+      long cutoff = Time.now() - (messageAgeLimitSeconds * 1000);
       cutoff = (cutoff / 1000);
       cleanupMessages(errors, errorsPurgeInformation, cutoff, maxUniqueMessages);
       cleanupMessages(warnings, warningsPurgeInformation, cutoff,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
index 4fc87d9..fa5a587 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
@@ -32,7 +32,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
@@ -41,6 +40,7 @@
 import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcase;
 import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder;
 import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder.AppDescriptor;
+import org.apache.log4j.Level;
 
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT;
 import static org.apache.hadoop.yarn.logaggregation.LogAggregationTestUtils.enableFileControllers;
@@ -67,7 +67,7 @@
 
   @BeforeAll
   public static void beforeClass() {
-    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+    org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG);
   }
 
   @BeforeEach
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
index 0fd2841..346239f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
@@ -28,7 +28,6 @@
 import org.slf4j.Marker;
 import org.slf4j.MarkerFactory;
 
-import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
index c04fba0..6b0570a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
-import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
index 05031ad..4128546 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
@@ -24,7 +24,7 @@
 import org.apache.hadoop.util.GenericsUtil;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.security.AdminACLsManager;
-import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
index 8e24e8c..87d511b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
@@ -20,7 +20,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
-import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
index 12b6dd7..c849619 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
@@ -50,12 +50,11 @@
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
+import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertEquals;
@@ -64,7 +63,8 @@
 
 public class TestContainersMonitorResourceChange {
 
-  static final Logger LOG = LoggerFactory.getLogger(TestContainersMonitorResourceChange.class);
+  static final Logger LOG = Logger
+      .getLogger(TestContainersMonitorResourceChange.class);
   private ContainersMonitorImpl containersMonitor;
   private MockExecutor executor;
   private Configuration conf;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 7ea8a62..9d096d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -245,13 +245,6 @@
       <scope>test</scope>
     </dependency>
 
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-logging</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 80cc9fc..dc69eba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.log4j.Logger;
 
 import java.util.Collections;
 import java.util.HashMap;
@@ -41,9 +42,6 @@
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.LongBinaryOperator;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 /**
  * In-memory mapping between applications/container-tags and nodes/racks.
  * Required by constrained affinity/anti-affinity and cardinality placement.
@@ -52,7 +50,8 @@
 @InterfaceStability.Unstable
 public class AllocationTagsManager {
 
-  private static final Logger LOG = LoggerFactory.getLogger(AllocationTagsManager.class);
+  private static final Logger LOG = Logger.getLogger(
+      AllocationTagsManager.class);
 
   private ReentrantReadWriteLock.ReadLock readLock;
   private ReentrantReadWriteLock.WriteLock writeLock;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
index 15e2d34..c17d4f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
-import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
index 12b017a..9a85315 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
@@ -30,7 +30,6 @@
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -232,8 +231,8 @@
     conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INITIAL_DELAY, 10);
     conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId.getId());
 
-    LogCapturer logCapture =
-        LogCapturer.captureLogs(FederationStateStoreService.LOG);
+    GenericTestUtils.LogCapturer logCapture =
+        GenericTestUtils.LogCapturer.captureLogs(FederationStateStoreService.LOG);
 
     final MockRM rm = new MockRM(conf);
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index dc2d18d..a1989d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -28,13 +28,17 @@
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -79,7 +83,6 @@
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.slf4j.LoggerFactory;
 
 public class TestSystemMetricsPublisherForV2 {
 
@@ -298,15 +301,42 @@
   @Test(timeout = 10000)
   public void testPutEntityWhenNoCollector() throws Exception {
     // Validating the logs as DrainDispatcher won't throw exception
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    class TestAppender extends AppenderSkeleton {
+      private final List<LoggingEvent> log = new ArrayList<>();
+
+      @Override
+      public boolean requiresLayout() {
+        return false;
+      }
+
+      @Override
+      protected void append(final LoggingEvent loggingEvent) {
+        log.add(loggingEvent);
+      }
+
+      @Override
+      public void close() {
+      }
+
+      public List<LoggingEvent> getLog() {
+        return new ArrayList<>(log);
+      }
+    }
+
+    TestAppender appender = new TestAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
+
     try {
       RMApp app = createRMApp(ApplicationId.newInstance(0, 1));
       metricsPublisher.appCreated(app, app.getStartTime());
       dispatcher.await();
-      assertFalse("Dispatcher Crashed",
-          logCapturer.getOutput().contains("Error in dispatcher thread"));
+      for (LoggingEvent event : appender.getLog()) {
+        assertFalse("Dispatcher Crashed",
+            event.getRenderedMessage().contains("Error in dispatcher thread"));
+      }
     } finally {
-      logCapturer.stopCapturing();
+      logger.removeAppender(appender);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
index 07630f5..2e7b01e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
@@ -18,11 +18,12 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.junit.Test;
 import java.io.IOException;
 import java.util.Map;
@@ -156,7 +157,7 @@
 
   @Test
   public void testPreemptionToBalanceWithVcoreResource() throws IOException {
-    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+    Logger.getRootLogger().setLevel(Level.DEBUG);
     String labelsConfig = "=100:100,true"; // default partition
     String nodesConfig = "n1="; // only one node
     String queuesConfig =
@@ -194,7 +195,7 @@
 
   @Test
   public void testPreemptionToBalanceWithConfiguredTimeout() throws IOException {
-    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+    Logger.getRootLogger().setLevel(Level.DEBUG);
     String labelsConfig = "=100:100,true"; // default partition
     String nodesConfig = "n1="; // only one node
     String queuesConfig =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
index c6066fd..024ec86 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -16,7 +16,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework;
 
-import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicyForNodePartitions;
 import org.slf4j.Logger;
@@ -111,7 +110,8 @@
   public void setup() {
     resetResourceInformationMap();
 
-    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+    org.apache.log4j.Logger.getRootLogger().setLevel(
+        org.apache.log4j.Level.DEBUG);
 
     conf = new CapacitySchedulerConfiguration(new Configuration(false));
     conf.setLong(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
index c5add68..6aaa15f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
@@ -25,10 +25,9 @@
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.log4j.Logger;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static junit.framework.TestCase.fail;
 
@@ -38,7 +37,8 @@
  * the invariant throws in case the invariants are not respected.
  */
 public class TestMetricsInvariantChecker {
-  public final static Logger LOG = LoggerFactory.getLogger(TestMetricsInvariantChecker.class);
+  public final static Logger LOG =
+      Logger.getLogger(TestMetricsInvariantChecker.class);
 
   private MetricsSystem metricsSystem;
   private MetricsInvariantChecker ic;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 68bbc94..38fbcd8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -19,7 +19,6 @@
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
@@ -30,13 +29,19 @@
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
-import org.slf4j.LoggerFactory;
 
 import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
 
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration.parseResourceConfigValue;
 import static org.junit.Assert.assertEquals;
@@ -49,6 +54,29 @@
 
   private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
 
+  private static class TestAppender extends AppenderSkeleton {
+
+    private final List<LoggingEvent> logEvents = new CopyOnWriteArrayList<>();
+
+    @Override
+    public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override
+    public void close() {
+    }
+
+    @Override
+    protected void append(LoggingEvent arg0) {
+      logEvents.add(arg0);
+    }
+
+    private List<LoggingEvent> getLogEvents() {
+      return logEvents;
+    }
+  }
+
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
@@ -723,7 +751,9 @@
 
   @Test
   public void testMemoryIncrementConfiguredViaMultipleProperties() {
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    TestAppender testAppender = new TestAppender();
+    Logger logger = LogManager.getRootLogger();
+    logger.addAppender(testAppender);
     try {
       Configuration conf = new Configuration();
       conf.set("yarn.scheduler.increment-allocation-mb", "7");
@@ -733,19 +763,23 @@
       FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf);
       Resource increment = fsc.getIncrementAllocation();
       Assert.assertEquals(13L, increment.getMemorySize());
-      assertTrue("Warning message is not logged when specifying memory "
-          + "increment via multiple properties", logCapturer.getOutput().contains("Configuration "
-          + "yarn.resource-types.memory-mb.increment-allocation=13 is "
-          + "overriding the yarn.scheduler.increment-allocation-mb=7 "
-          + "property"));
+      assertTrue("Warning message is not logged when specifying memory " +
+          "increment via multiple properties",
+          testAppender.getLogEvents().stream().anyMatch(
+            e -> e.getLevel() == Level.WARN && ("Configuration " +
+              "yarn.resource-types.memory-mb.increment-allocation=13 is " +
+              "overriding the yarn.scheduler.increment-allocation-mb=7 " +
+              "property").equals(e.getMessage())));
     } finally {
-      logCapturer.stopCapturing();
+      logger.removeAppender(testAppender);
     }
   }
 
   @Test
   public void testCpuIncrementConfiguredViaMultipleProperties() {
-    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+    TestAppender testAppender = new TestAppender();
+    Logger logger = LogManager.getRootLogger();
+    logger.addAppender(testAppender);
     try {
       Configuration conf = new Configuration();
       conf.set("yarn.scheduler.increment-allocation-vcores", "7");
@@ -755,13 +789,15 @@
       FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf);
       Resource increment = fsc.getIncrementAllocation();
       Assert.assertEquals(13, increment.getVirtualCores());
-      assertTrue("Warning message is not logged when specifying CPU vCores "
-          + "increment via multiple properties", logCapturer.getOutput().contains("Configuration "
-          + "yarn.resource-types.vcores.increment-allocation=13 is "
-          + "overriding the yarn.scheduler.increment-allocation-vcores=7 "
-          + "property"));
+      assertTrue("Warning message is not logged when specifying CPU vCores " +
+          "increment via multiple properties",
+          testAppender.getLogEvents().stream().anyMatch(
+            e -> e.getLevel() == Level.WARN && ("Configuration " +
+              "yarn.resource-types.vcores.increment-allocation=13 is " +
+              "overriding the yarn.scheduler.increment-allocation-vcores=7 " +
+              "property").equals(e.getMessage())));
     } finally {
-      logCapturer.stopCapturing();
+      logger.removeAppender(testAppender);
     }
   }
 }