HADOOP-18207. Introduce hadoop-logging module (#5503)

Reviewed-by: Duo Zhang <zhangduo@apache.org>
diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index 4deda43..9a060f7 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -46,16 +46,6 @@
       <artifactId>slf4j-api</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <scope>runtime</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>runtime</scope>
-    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 433a615..4cdd600 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -82,14 +82,14 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <scope>runtime</scope>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>runtime</scope>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
index f9c922c..e18982d 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
@@ -15,8 +15,7 @@
 
 import java.util.Random;
 
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -30,9 +29,8 @@
   private final int timeout = 500;
   private final long rolloverFrequency = timeout / 2;
 
-  {
-    LogManager.getLogger(
-        RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
+  static {
+    HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG");
   }
 
   @Test
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index 628342e..d81d1eb 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -19,8 +19,7 @@
 import javax.servlet.ServletContext;
 
 import org.apache.curator.test.TestingServer;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -39,9 +38,8 @@
   private final int timeout = 100;
   private final long rolloverFrequency = timeout / 2;
 
-  {
-    LogManager.getLogger(
-        RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
+  static {
+    HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG");
   }
 
   @Before
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 6c6d3ec..58006c0 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -411,6 +411,16 @@
       <artifactId>lz4-java</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index b4eec1f..0866651 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -299,7 +299,7 @@
 yarn.ewma.cleanupInterval=300
 yarn.ewma.messageAgeLimitSeconds=86400
 yarn.ewma.maxUniqueMessages=250
-log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA=org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender
 log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 3287959..cf090ee 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -42,6 +42,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
@@ -50,8 +51,6 @@
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 
 /**
  * Change log level in runtime.
@@ -349,7 +348,7 @@
         }
 
         if (GenericsUtil.isLog4jLogger(logName)) {
-          process(Logger.getLogger(logName), level, out);
+          process(logName, level, out);
         } else {
           out.println("Sorry, setting log level is only supported for log4j loggers.<br />");
         }
@@ -368,19 +367,17 @@
         + "<input type='submit' value='Set Log Level' />"
         + "</form>";
 
-    private static void process(Logger log, String level,
-        PrintWriter out) throws IOException {
+    private static void process(String log, String level, PrintWriter out) {
       if (level != null) {
-        if (!level.equalsIgnoreCase(Level.toLevel(level)
-            .toString())) {
-          out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
-        } else {
-          log.setLevel(Level.toLevel(level));
+        try {
+          HadoopLoggerUtils.setLogLevel(log, level);
           out.println(MARKER + "Setting Level to " + level + " ...<br />");
+        } catch (IllegalArgumentException e) {
+          out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
         }
       }
-      out.println(MARKER
-          + "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
+      out.println(MARKER + "Effective Level: <b>" + HadoopLoggerUtils.getEffectiveLevel(log)
+          + "</b><br />");
     }
 
   }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 3debd36..3c13fea 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -40,8 +40,8 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.log4j.LogManager;
 
 import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
 
@@ -761,7 +761,7 @@
         public void run() {
           log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
             "Shutting down " + classname + " at " + hostname}));
-          LogManager.shutdown();
+          HadoopLoggerUtils.shutdownLogManager();
         }
       }, SHUTDOWN_HOOK_PRIORITY);
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index b3487ef..913826f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -68,6 +68,7 @@
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.alias.CredentialProvider;
 import org.apache.hadoop.security.alias.CredentialProviderFactory;
@@ -76,10 +77,8 @@
 
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
 import org.mockito.Mockito;
+import org.slf4j.LoggerFactory;
 
 public class TestConfiguration {
 
@@ -220,9 +219,7 @@
     InputStream in2 = new ByteArrayInputStream(bytes2);
 
     // Attach our own log appender so we can verify output
-    TestAppender appender = new TestAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
 
     try {
       // Add the 2 different resources - this should generate a warning
@@ -230,17 +227,13 @@
       conf.addResource(in2);
       assertEquals("should see the first value", "A", conf.get("prop"));
 
-      List<LoggingEvent> events = appender.getLog();
-      assertEquals("overriding a final parameter should cause logging", 1,
-          events.size());
-      LoggingEvent loggingEvent = events.get(0);
-      String renderedMessage = loggingEvent.getRenderedMessage();
-      assertTrue("did not see expected string inside message "+ renderedMessage,
-          renderedMessage.contains("an attempt to override final parameter: "
-              + "prop;  Ignoring."));
+      String renderedMessage = logCapturer.getOutput();
+      assertTrue("did not see expected string inside message " + renderedMessage,
+          renderedMessage.contains(
+              "an attempt to override final parameter: " + "prop;  Ignoring."));
     } finally {
       // Make sure the appender is removed
-      logger.removeAppender(appender);
+      logCapturer.stopCapturing();
     }
   }
 
@@ -258,9 +251,7 @@
     InputStream in2 = new ByteArrayInputStream(bytes);
 
     // Attach our own log appender so we can verify output
-    TestAppender appender = new TestAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
 
     try {
       // Add the resource twice from a stream - should not generate warnings
@@ -268,20 +259,15 @@
       conf.addResource(in2);
       assertEquals("A", conf.get("prop"));
 
-      List<LoggingEvent> events = appender.getLog();
-      for (LoggingEvent loggingEvent : events) {
-        System.out.println("Event = " + loggingEvent.getRenderedMessage());
-      }
+      String appenderOutput = logCapturer.getOutput();
       assertTrue("adding same resource twice should not cause logging",
-          events.isEmpty());
+          appenderOutput.isEmpty());
     } finally {
       // Make sure the appender is removed
-      logger.removeAppender(appender);
+      logCapturer.stopCapturing();
     }
   }
 
-
-
   @Test
   public void testFinalWarningsMultiple() throws Exception {
     // Make a configuration file with a repeated final property
@@ -295,24 +281,19 @@
     InputStream in1 = new ByteArrayInputStream(bytes);
 
     // Attach our own log appender so we can verify output
-    TestAppender appender = new TestAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
 
     try {
       // Add the resource - this should not produce a warning
       conf.addResource(in1);
       assertEquals("should see the value", "A", conf.get("prop"));
 
-      List<LoggingEvent> events = appender.getLog();
-      for (LoggingEvent loggingEvent : events) {
-        System.out.println("Event = " + loggingEvent.getRenderedMessage());
-      }
+      String appenderOutput = logCapturer.getOutput();
       assertTrue("adding same resource twice should not cause logging",
-          events.isEmpty());
+          appenderOutput.isEmpty());
     } finally {
       // Make sure the appender is removed
-      logger.removeAppender(appender);
+      logCapturer.stopCapturing();
     }
   }
 
@@ -329,48 +310,20 @@
     InputStream in1 = new ByteArrayInputStream(bytes);
 
     // Attach our own log appender so we can verify output
-    TestAppender appender = new TestAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
 
     try {
       // Add the resource - this should produce a warning
       conf.addResource(in1);
       assertEquals("should see the value", "A", conf.get("prop"));
 
-      List<LoggingEvent> events = appender.getLog();
-      assertEquals("overriding a final parameter should cause logging", 1,
-          events.size());
-      LoggingEvent loggingEvent = events.get(0);
-      String renderedMessage = loggingEvent.getRenderedMessage();
-      assertTrue("did not see expected string inside message "+ renderedMessage,
-          renderedMessage.contains("an attempt to override final parameter: "
-              + "prop;  Ignoring."));
+      String renderedMessage = logCapturer.getOutput();
+      assertTrue("did not see expected string inside message " + renderedMessage,
+          renderedMessage.contains(
+              "an attempt to override final parameter: " + "prop;  Ignoring."));
     } finally {
       // Make sure the appender is removed
-      logger.removeAppender(appender);
-    }
-  }
-
-  /**
-   * A simple appender for white box testing.
-   */
-  private static class TestAppender extends AppenderSkeleton {
-    private final List<LoggingEvent> log = new ArrayList<>();
-
-    @Override public boolean requiresLayout() {
-      return false;
-    }
-
-    @Override protected void append(final LoggingEvent loggingEvent) {
-      log.add(loggingEvent);
-    }
-
-    @Override public void close() {
-    }
-
-    public List<LoggingEvent> getLog() {
-      return new ArrayList<>(log);
+      logCapturer.stopCapturing();
     }
   }
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
index c016ff0..9e4405f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
@@ -36,8 +36,9 @@
 import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 import org.apache.hadoop.util.NativeCodeLoader;
-import org.apache.log4j.Logger;
 import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -47,9 +48,6 @@
 
 public class CompressDecompressTester<T extends Compressor, E extends Decompressor> {
 
-  private static final Logger logger = Logger
-      .getLogger(CompressDecompressTester.class);
-
   private final byte[] originalRawData;
 
   private ImmutableList<TesterPair<T, E>> pairs = ImmutableList.of();
@@ -488,12 +486,12 @@
 
     return false;      
   }
-  
+
   abstract static class TesterCompressionStrategy {
 
-    protected final Logger logger = Logger.getLogger(getClass());
+    protected final Logger logger = LoggerFactory.getLogger(getClass());
 
-    abstract void assertCompression(String name, Compressor compressor,
-        Decompressor decompressor, byte[] originalRawData) throws Exception;
+    abstract void assertCompression(String name, Compressor compressor, Decompressor decompressor,
+        byte[] originalRawData) throws Exception;
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
index 636c03a..99a1ff8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.log.LogLevel.CLI;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
@@ -40,12 +41,11 @@
 import org.junit.Assert;
 
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.net.ssl.SSLException;
@@ -67,7 +67,7 @@
   private final String logName = TestLogLevel.class.getName();
   private String clientPrincipal;
   private String serverPrincipal;
-  private final Logger log = Logger.getLogger(logName);
+  private final Logger log = LoggerFactory.getLogger(logName);
   private final static String PRINCIPAL = "loglevel.principal";
   private final static String KEYTAB  = "loglevel.keytab";
   private static final String PREFIX = "hadoop.http.authentication.";
@@ -76,7 +76,7 @@
   public static void setUp() throws Exception {
     org.slf4j.Logger logger =
         LoggerFactory.getLogger(KerberosAuthenticator.class);
-    GenericTestUtils.setLogLevel(logger, Level.DEBUG);
+    HadoopLoggerUtils.setLogLevel(logger.getName(), "DEBUG");
     FileUtil.fullyDelete(BASEDIR);
     if (!BASEDIR.mkdirs()) {
       throw new Exception("unable to create the base directory for testing");
@@ -230,7 +230,7 @@
       final String connectProtocol, final boolean isSpnego)
       throws Exception {
     testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego,
-        Level.DEBUG.toString());
+        "DEBUG");
   }
 
   /**
@@ -250,9 +250,8 @@
     if (!LogLevel.isValidProtocol(connectProtocol)) {
       throw new Exception("Invalid client protocol " + connectProtocol);
     }
-    Level oldLevel = log.getEffectiveLevel();
-    Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.",
-        Level.ERROR, oldLevel);
+    String oldLevel = HadoopLoggerUtils.getEffectiveLevel(log.getName());
+    Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.", "ERROR", oldLevel);
 
     // configs needed for SPNEGO at server side
     if (isSpnego) {
@@ -288,7 +287,7 @@
     });
     server.stop();
     // restore log level
-    GenericTestUtils.setLogLevel(log, oldLevel);
+    HadoopLoggerUtils.setLogLevel(log.getName(), oldLevel.toString());
   }
 
   /**
@@ -322,7 +321,7 @@
     cli.run(setLevelArgs);
 
     assertEquals("new level not equal to expected: ", newLevel.toUpperCase(),
-        log.getEffectiveLevel().toString());
+        HadoopLoggerUtils.getEffectiveLevel(log.getName()));
   }
 
   /**
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
index 0dabe46..8cfa14c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
@@ -42,8 +42,9 @@
 import static org.apache.hadoop.metrics2.lib.Interns.info;
 import static org.junit.Assert.assertEquals;
 
-import org.apache.log4j.Logger;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.management.MBeanAttributeInfo;
 import javax.management.MBeanInfo;
@@ -241,7 +242,7 @@
     private MetricsSourceAdapter sa = null;
     private ScheduledFuture<?> future = null;
     private AtomicBoolean hasError = null;
-    private static final Logger LOG = Logger.getLogger(SourceUpdater.class);
+    private static final Logger LOG = LoggerFactory.getLogger(SourceUpdater.class);
 
     public SourceUpdater(MetricsSourceAdapter sourceAdapter,
         AtomicBoolean err) {
@@ -263,7 +264,7 @@
       } catch (Exception e) {
         // catch all errors
         hasError.set(true);
-        LOG.error(e.getStackTrace());
+        LOG.error("Something went wrong.", e);
       } finally {
         if (hasError.get()) {
           LOG.error("Hit error, stopping now");
@@ -284,7 +285,7 @@
     private int cnt = 0;
     private ScheduledFuture<?> future = null;
     private AtomicBoolean hasError = null;
-    private static final Logger LOG = Logger.getLogger(SourceReader.class);
+    private static final Logger LOG = LoggerFactory.getLogger(SourceReader.class);
 
     public SourceReader(
         TestMetricsSource source, MetricsSourceAdapter sourceAdapter,
@@ -318,7 +319,7 @@
       } catch (Exception e) {
         // catch other errors
         hasError.set(true);
-        LOG.error(e.getStackTrace());
+        LOG.error("Something went wrong.", e);
       } finally {
         if (hasError.get()) {
           future.cancel(false);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
index 8c1339d..b139971 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
@@ -22,7 +22,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
@@ -41,8 +41,8 @@
   private static final Logger TESTLOG =
       LoggerFactory.getLogger(TestShellBasedUnixGroupsMapping.class);
 
-  private final GenericTestUtils.LogCapturer shellMappingLog =
-      GenericTestUtils.LogCapturer.captureLogs(
+  private final LogCapturer shellMappingLog =
+      LogCapturer.captureLogs(
           ShellBasedUnixGroupsMapping.LOG);
 
   private class TestGroupUserNotExist
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
index a0ce721..6a6fff8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
@@ -19,6 +19,8 @@
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
+
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -42,7 +44,7 @@
     private static final String BASEDIR = GenericTestUtils.getTempPath(
             TestReloadingX509TrustManager.class.getSimpleName());
 
-    private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs(
+    private final LogCapturer reloaderLog = LogCapturer.captureLogs(
             FileMonitoringTimerTask.LOG);
 
     @BeforeClass
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
index 6358959..8d2a4c7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
@@ -19,7 +19,7 @@
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 
 import java.util.function.Supplier;
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
index b7b86b7..839c51c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.service;
 
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Mock;
@@ -29,7 +29,7 @@
 
 import java.io.PrintWriter;
 
-import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs;
+import static org.apache.hadoop.logging.LogCapturer.captureLogs;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.times;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index e54971e..825fc70 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -25,7 +25,6 @@
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.PrintStream;
-import java.io.StringWriter;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
@@ -38,7 +37,6 @@
 import java.util.Objects;
 import java.util.Random;
 import java.util.Set;
-import java.util.Enumeration;
 import java.util.TreeSet;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CountDownLatch;
@@ -53,17 +51,11 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
 import org.apache.hadoop.util.DurationInfo;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Appender;
-import org.apache.log4j.Layout;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import org.apache.log4j.WriterAppender;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.mockito.invocation.InvocationOnMock;
@@ -115,51 +107,17 @@
   public static final String ERROR_INVALID_ARGUMENT =
       "Total wait time should be greater than check interval time";
 
-  @Deprecated
-  public static Logger toLog4j(org.slf4j.Logger logger) {
-    return LogManager.getLogger(logger.getName());
-  }
-
-  /**
-   * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
-   */
-  @Deprecated
-  public static void disableLog(Logger logger) {
-    logger.setLevel(Level.OFF);
-  }
-
   public static void disableLog(org.slf4j.Logger logger) {
-    disableLog(toLog4j(logger));
-  }
-
-  public static void setLogLevel(Logger logger, Level level) {
-    logger.setLevel(level);
-  }
-
-  /**
-   * @deprecated
-   * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
-   */
-  @Deprecated
-  public static void setLogLevel(org.slf4j.Logger logger, Level level) {
-    setLogLevel(toLog4j(logger), level);
+    HadoopLoggerUtils.setLogLevel(logger.getName(), "OFF");
   }
 
   public static void setLogLevel(org.slf4j.Logger logger,
                                  org.slf4j.event.Level level) {
-    setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
+    HadoopLoggerUtils.setLogLevel(logger.getName(), level.toString());
   }
 
   public static void setRootLogLevel(org.slf4j.event.Level level) {
-    setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
-  }
-
-  public static void setCurrentLoggersLogLevel(org.slf4j.event.Level level) {
-    for (Enumeration<?> loggers = LogManager.getCurrentLoggers();
-        loggers.hasMoreElements();) {
-      Logger logger = (Logger) loggers.nextElement();
-      logger.setLevel(Level.toLevel(level.toString()));
-    }
+    HadoopLoggerUtils.setLogLevel("root", level.toString());
   }
 
   public static org.slf4j.event.Level toLevel(String level) {
@@ -471,47 +429,6 @@
     }
   }
 
-  public static class LogCapturer {
-    private StringWriter sw = new StringWriter();
-    private WriterAppender appender;
-    private Logger logger;
-
-    public static LogCapturer captureLogs(org.slf4j.Logger logger) {
-      if (logger.getName().equals("root")) {
-        return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
-      }
-      return new LogCapturer(toLog4j(logger));
-    }
-
-    public static LogCapturer captureLogs(Logger logger) {
-      return new LogCapturer(logger);
-    }
-
-    private LogCapturer(Logger logger) {
-      this.logger = logger;
-      Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
-      if (defaultAppender == null) {
-        defaultAppender = Logger.getRootLogger().getAppender("console");
-      }
-      final Layout layout = (defaultAppender == null) ? new PatternLayout() :
-          defaultAppender.getLayout();
-      this.appender = new WriterAppender(layout, sw);
-      logger.addAppender(this.appender);
-    }
-
-    public String getOutput() {
-      return sw.toString();
-    }
-
-    public void stopCapturing() {
-      logger.removeAppender(appender);
-    }
-
-    public void clearOutput() {
-      sw.getBuffer().setLength(0);
-    }
-  }
-
   /**
    * Mockito answer helper that triggers one latch as soon as the
    * method is called, then waits on another before continuing.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index 8489e3d..f6f4a44 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -26,6 +26,8 @@
 import java.util.function.Supplier;
 import org.slf4j.event.Level;
 
+import org.apache.hadoop.logging.LogCapturer;
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
index 98e1822..8375864 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
@@ -22,8 +22,8 @@
 
 import org.junit.Assert;
 
-import org.apache.log4j.Logger;
 import org.junit.Test;
+import org.slf4j.Logger;
 
 public class TestClassUtil {
   @Test(timeout=10000)
@@ -35,6 +35,6 @@
     Assert.assertTrue("Containing jar does not exist on file system ",
         jarFile.exists());
     Assert.assertTrue("Incorrect jar file " + containingJar,
-        jarFile.getName().matches("log4j.*[.]jar"));
+        jarFile.getName().matches("slf4j-api.*[.]jar"));
   }
 }
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
index 1d1ce89..ec26af66 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
@@ -28,7 +28,7 @@
 import static org.junit.Assert.*;
 
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.assertj.core.api.Assertions;
 import org.junit.Before;
 import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
index f43930d..fb6221f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
@@ -28,10 +28,12 @@
 import java.util.Random;
 
 import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.util.hash.Hash;
-import org.apache.log4j.Logger;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
@@ -113,7 +115,7 @@
   }
 
   interface FilterTesterStrategy {
-    final Logger logger = Logger.getLogger(FilterTesterStrategy.class);
+    Logger logger = LoggerFactory.getLogger(FilterTesterStrategy.class);
 
     void assertWhat(Filter filter, int numInsertions, int hashId,
         ImmutableSet<Integer> falsePositives);
diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml
index 96588a2..8a04c4e 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -54,6 +54,12 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hadoop.thirdparty</groupId>
       <artifactId>hadoop-shaded-guava</artifactId>
       <scope>compile</scope>
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index f4c7fbe..97d85428 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Time;
 import org.apache.http.client.utils.URIBuilder;
 import org.junit.After;
@@ -583,8 +584,8 @@
   @Test
   public void testStartStopHttpPseudo() throws Exception {
     // Make sure bogus errors don't get emitted.
-    GenericTestUtils.LogCapturer logs =
-        GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger(
+    LogCapturer logs =
+        LogCapturer.captureLogs(LoggerFactory.getLogger(
             "com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator"));
     try {
       testStartStop(false, false);
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
index 3d0fd7d..6e12d94 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
@@ -18,23 +18,24 @@
 package org.apache.hadoop.crypto.key.kms.server;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.FilterOutputStream;
-import java.io.InputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Paths;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.lang3.reflect.FieldUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
-import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.ThreadUtil;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.PropertyConfigurator;
+
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -67,24 +68,23 @@
   public final Timeout testTimeout = new Timeout(180000L, TimeUnit.MILLISECONDS);
 
   @Before
-  public void setUp() throws IOException {
+  public void setUp() throws IOException, URISyntaxException {
     originalOut = System.err;
     memOut = new ByteArrayOutputStream();
     filterOut = new FilterOut(memOut);
     capturedOut = new PrintStream(filterOut);
     System.setErr(capturedOut);
-    InputStream is =
-        ThreadUtil.getResourceAsStream("log4j-kmsaudit.properties");
-    PropertyConfigurator.configure(is);
-    IOUtils.closeStream(is);
+    URL url = getClass().getClassLoader().getResource("log4j-kmsaudit.properties");
+    File file = Paths.get(url.toURI()).toFile();
+    HadoopLoggerUtils.updateLog4jConfiguration(KMSAudit.class, file.getAbsolutePath());
     Configuration conf = new Configuration();
     this.kmsAudit = new KMSAudit(conf);
   }
 
   @After
-  public void cleanUp() {
+  public void cleanUp() throws Exception {
     System.setErr(originalOut);
-    LogManager.resetConfiguration();
+    HadoopLoggerUtils.resetConfiguration();
     kmsAudit.shutdown();
   }
 
diff --git a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 0000000..304d1e4
--- /dev/null
+++ b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,23 @@
+<FindBugsFilter>
+  <!--
+   conversionPattern is only set once and used to initiate PatternLayout object
+   only once. It is set by log4j framework if set as part of log4j properties and accessed
+   only during first append operation.
+  -->
+  <Match>
+    <Class name="org.apache.hadoop.logging.appenders.AsyncRFAAppender"/>
+    <Field name="conversionPattern"/>
+    <Bug pattern="IS2_INCONSISTENT_SYNC"/>
+  </Match>
+
+  <!-- Following fields are used in ErrorsAndWarningsBlock, which is not a part of analysis of findbugs -->
+  <Match>
+    <Class name="org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender$Element"/>
+    <Or>
+      <Field name="count"/>
+      <Field name="timestampSeconds"/>
+    </Or>
+    <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD"/>
+  </Match>
+
+</FindBugsFilter>
diff --git a/hadoop-common-project/hadoop-logging/pom.xml b/hadoop-common-project/hadoop-logging/pom.xml
new file mode 100644
index 0000000..20af2be
--- /dev/null
+++ b/hadoop-common-project/hadoop-logging/pom.xml
@@ -0,0 +1,125 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hadoop-project</artifactId>
+    <groupId>org.apache.hadoop</groupId>
+    <version>3.4.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>hadoop-logging</artifactId>
+  <version>3.4.0-SNAPSHOT</version>
+  <packaging>jar</packaging>
+
+  <name>Apache Hadoop Logging</name>
+  <description>Logging Support for Apache Hadoop project</description>
+
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-source-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>jar</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <attach>true</attach>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>prepare-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>jar</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>prepare-test-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>dev-support/findbugsExcludeFile.xml</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>com.github.spotbugs</groupId>
+        <artifactId>spotbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java
new file mode 100644
index 0000000..b0bd2e3
--- /dev/null
+++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.logging;
+
+import java.io.FileInputStream;
+import java.io.Flushable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Enumeration;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.log4j.Appender;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PropertyConfigurator;
+
+/**
+ * Hadoop's internal class that access log4j APIs directly.
+ * <p/>
+ * This class will depend on log4j directly, so callers should not use this class directly to avoid
+ * introducing log4j dependencies to downstream users. Please call the methods in
+ * {@link HadoopLoggerUtils}, as they will call the methods here through reflection.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+final class HadoopInternalLog4jUtils {
+
+  private HadoopInternalLog4jUtils() {
+  }
+
+  static void setLogLevel(String loggerName, String levelName) {
+    if (loggerName == null) {
+      throw new IllegalArgumentException("logger name cannot be null");
+    }
+    Logger logger = loggerName.equalsIgnoreCase("root") ?
+        LogManager.getRootLogger() :
+        LogManager.getLogger(loggerName);
+    Level level = Level.toLevel(levelName.toUpperCase());
+    if (!level.toString().equalsIgnoreCase(levelName)) {
+      throw new IllegalArgumentException("Unsupported log level " + levelName);
+    }
+    logger.setLevel(level);
+  }
+
+  static void shutdownLogManager() {
+    LogManager.shutdown();
+  }
+
+  static String getEffectiveLevel(String loggerName) {
+    Logger logger = loggerName.equalsIgnoreCase("root") ?
+        LogManager.getRootLogger() :
+        LogManager.getLogger(loggerName);
+    return logger.getEffectiveLevel().toString();
+  }
+
+  static void resetConfiguration() {
+    LogManager.resetConfiguration();
+  }
+
+  static void updateLog4jConfiguration(Class<?> targetClass, String log4jPath) throws Exception {
+    Properties customProperties = new Properties();
+    try (FileInputStream fs = new FileInputStream(log4jPath);
+        InputStream is = targetClass.getResourceAsStream("/log4j.properties")) {
+      customProperties.load(fs);
+      Properties originalProperties = new Properties();
+      originalProperties.load(is);
+      for (Map.Entry<Object, Object> entry : customProperties.entrySet()) {
+        originalProperties.setProperty(entry.getKey().toString(), entry.getValue().toString());
+      }
+      LogManager.resetConfiguration();
+      PropertyConfigurator.configure(originalProperties);
+    }
+  }
+
+  static boolean hasAppenders(String logger) {
+    return Logger.getLogger(logger)
+        .getAllAppenders()
+        .hasMoreElements();
+  }
+
+  @SuppressWarnings("unchecked")
+  static void syncLogs() {
+    // flush standard streams
+    //
+    System.out.flush();
+    System.err.flush();
+
+    // flush flushable appenders
+    //
+    final Logger rootLogger = Logger.getRootLogger();
+    flushAppenders(rootLogger);
+    final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository().
+        getCurrentLoggers();
+    while (allLoggers.hasMoreElements()) {
+      final Logger l = allLoggers.nextElement();
+      flushAppenders(l);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private static void flushAppenders(Logger l) {
+    final Enumeration<Appender> allAppenders = l.getAllAppenders();
+    while (allAppenders.hasMoreElements()) {
+      final Appender a = allAppenders.nextElement();
+      if (a instanceof Flushable) {
+        try {
+          ((Flushable) a).flush();
+        } catch (IOException ioe) {
+          System.err.println(a + ": Failed to flush!"
+              + stringifyException(ioe));
+        }
+      }
+    }
+  }
+
+  private static String stringifyException(Throwable e) {
+    StringWriter stringWriter = new StringWriter();
+    PrintWriter printWriter = new PrintWriter(stringWriter);
+    e.printStackTrace(printWriter);
+    printWriter.close();
+    return stringWriter.toString();
+  }
+
+}
diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java
new file mode 100644
index 0000000..1d0bea1
--- /dev/null
+++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.logging;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A bridge class for operating on logging framework, such as changing log4j log level, etc.
+ * Will call the methods in {@link HadoopInternalLog4jUtils} to perform operations on log4j level.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class HadoopLoggerUtils {
+
+  private static final String INTERNAL_UTILS_CLASS =
+      "org.apache.hadoop.logging.HadoopInternalLog4jUtils";
+
+  private HadoopLoggerUtils() {
+  }
+
+  private static Method getMethod(String methodName, Class<?>... args) {
+    try {
+      Class<?> clazz = Class.forName(INTERNAL_UTILS_CLASS);
+      return clazz.getDeclaredMethod(methodName, args);
+    } catch (ClassNotFoundException | NoSuchMethodException e) {
+      throw new AssertionError("should not happen", e);
+    }
+  }
+
+  private static void throwUnchecked(Throwable throwable) {
+    if (throwable instanceof RuntimeException) {
+      throw (RuntimeException) throwable;
+    }
+    if (throwable instanceof Error) {
+      throw (Error) throwable;
+    }
+  }
+
+  public static void shutdownLogManager() {
+    Method method = getMethod("shutdownLogManager");
+    try {
+      method.invoke(null);
+    } catch (IllegalAccessException e) {
+      throw new AssertionError("should not happen", e);
+    } catch (InvocationTargetException e) {
+      throwUnchecked(e.getCause());
+      throw new AssertionError("Failed to execute, should not happen", e.getCause());
+    }
+  }
+
+  public static void setLogLevel(String loggerName, String levelName) {
+    Method method = getMethod("setLogLevel", String.class, String.class);
+    try {
+      method.invoke(null, loggerName, levelName);
+    } catch (IllegalAccessException e) {
+      throw new AssertionError("should not happen", e);
+    } catch (InvocationTargetException e) {
+      throwUnchecked(e.getCause());
+      throw new AssertionError("Failed to execute, should not happen", e.getCause());
+    }
+  }
+
+  public static String getEffectiveLevel(String loggerName) {
+    Method method = getMethod("getEffectiveLevel", String.class);
+    try {
+      return (String) method.invoke(null, loggerName);
+    } catch (IllegalAccessException e) {
+      throw new AssertionError("should not happen", e);
+    } catch (InvocationTargetException e) {
+      throwUnchecked(e.getCause());
+      throw new AssertionError("Failed to execute, should not happen", e.getCause());
+    }
+  }
+
+  public static void resetConfiguration() {
+    Method method = getMethod("resetConfiguration");
+    try {
+      method.invoke(null);
+    } catch (IllegalAccessException e) {
+      throw new AssertionError("should not happen", e);
+    } catch (InvocationTargetException e) {
+      throwUnchecked(e.getCause());
+      throw new AssertionError("Failed to execute, should not happen", e.getCause());
+    }
+  }
+
+  public static void updateLog4jConfiguration(Class<?> targetClass, String log4jPath) {
+    Method method = getMethod("updateLog4jConfiguration", Class.class, String.class);
+    try {
+      method.invoke(null, targetClass, log4jPath);
+    } catch (IllegalAccessException e) {
+      throw new AssertionError("should not happen", e);
+    } catch (InvocationTargetException e) {
+      throwUnchecked(e.getCause());
+      throw new AssertionError("Failed to execute, should not happen", e.getCause());
+    }
+  }
+
+  public static boolean hasAppenders(String logger) {
+    Method method = getMethod("hasAppenders", String.class);
+    try {
+      return (Boolean) method.invoke(null, logger);
+    } catch (IllegalAccessException e) {
+      throw new AssertionError("should not happen", e);
+    } catch (InvocationTargetException e) {
+      throwUnchecked(e.getCause());
+      throw new AssertionError("Failed to execute, should not happen", e.getCause());
+    }
+  }
+
+  public synchronized static void syncLogs() {
+    Method method = getMethod("syncLogs");
+    try {
+      method.invoke(null);
+    } catch (IllegalAccessException e) {
+      throw new AssertionError("should not happen", e);
+    } catch (InvocationTargetException e) {
+      throwUnchecked(e.getCause());
+      throw new AssertionError("Failed to execute, should not happen", e.getCause());
+    }
+  }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java
similarity index 98%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
rename to hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java
index 276e5b0..2abfffb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
+++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.logging.appenders;
 
 import java.io.IOException;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java
similarity index 93%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
rename to hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java
index fffc8a8..cf7a2bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
+++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java
@@ -16,12 +16,10 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.util;
+package org.apache.hadoop.logging.appenders;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
 import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -113,16 +111,13 @@
   /**
    * Create an appender to keep track of the errors and warnings logged by the
    * system.
-   * 
-   * @param cleanupIntervalSeconds
-   *          the interval at which old messages are purged to prevent the
-   *          message stores from growing unbounded
-   * @param messageAgeLimitSeconds
-   *          the maximum age of a message in seconds before it is purged from
-   *          the store
-   * @param maxUniqueMessages
-   *          the maximum number of unique messages of each type we keep before
-   *          we start purging
+   *
+   * @param cleanupIntervalSeconds the interval at which old messages are purged to prevent the
+   * message stores from growing unbounded.
+   * @param messageAgeLimitSeconds the maximum age of a message in seconds before it is purged from
+   * the store.
+   * @param maxUniqueMessages the maximum number of unique messages of each type we keep before
+   * we start purging.
    */
   public Log4jWarningErrorMetricsAppender(int cleanupIntervalSeconds,
       long messageAgeLimitSeconds, int maxUniqueMessages) {
@@ -143,6 +138,20 @@
     this.setThreshold(Level.WARN);
   }
 
+  private static String join(CharSequence separator, String[] strings) {
+    StringBuilder sb = new StringBuilder();
+    boolean first = true;
+    for (String s : strings) {
+      if (first) {
+        first = false;
+      } else {
+        sb.append(separator);
+      }
+      sb.append(s);
+    }
+    return sb.toString();
+  }
+
   /**
    * {@inheritDoc}
    */
@@ -151,7 +160,7 @@
     String message = event.getRenderedMessage();
     String[] throwableStr = event.getThrowableStrRep();
     if (throwableStr != null) {
-      message = message + "\n" + StringUtils.join("\n", throwableStr);
+      message = message + "\n" + join("\n", throwableStr);
       message =
           org.apache.commons.lang3.StringUtils.left(message, MAX_MESSAGE_SIZE);
     }
@@ -232,7 +241,7 @@
    * getErrorMessagesAndCounts since the message store is purged at regular
    * intervals to prevent it from growing without bounds, while the store for
    * the counts is purged less frequently.
-   * 
+   *
    * @param cutoffs
    *          list of timestamp cutoffs(in seconds) for which the counts are
    *          desired
@@ -248,7 +257,7 @@
    * getWarningMessagesAndCounts since the message store is purged at regular
    * intervals to prevent it from growing without bounds, while the store for
    * the counts is purged less frequently.
-   * 
+   *
    * @param cutoffs
    *          list of timestamp cutoffs(in seconds) for which the counts are
    *          desired
@@ -285,7 +294,7 @@
    * differ from the ones provided by getErrorCounts since the message store is
    * purged at regular intervals to prevent it from growing without bounds,
    * while the store for the counts is purged less frequently.
-   * 
+   *
    * @param cutoffs
    *          list of timestamp cutoffs(in seconds) for which the counts are
    *          desired
@@ -304,7 +313,7 @@
    * may differ from the ones provided by getWarningCounts since the message
    * store is purged at regular intervals to prevent it from growing without
    * bounds, while the store for the counts is purged less frequently.
-   * 
+   *
    * @param cutoffs
    *          list of timestamp cutoffs(in seconds) for which the counts are
    *          desired
@@ -322,7 +331,7 @@
       SortedSet<PurgeElement> purgeInformation) {
     if (purgeInformation.size() > maxUniqueMessages) {
       ErrorAndWarningsCleanup cleanup = new ErrorAndWarningsCleanup();
-      long cutoff = Time.now() - (messageAgeLimitSeconds * 1000);
+      long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000);
       cutoff = (cutoff / 1000);
       cleanup.cleanupMessages(map, purgeInformation, cutoff, maxUniqueMessages);
     }
@@ -379,7 +388,7 @@
 
     @Override
     public void run() {
-      long cutoff = Time.now() - (messageAgeLimitSeconds * 1000);
+      long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000);
       cutoff = (cutoff / 1000);
       cleanupMessages(errors, errorsPurgeInformation, cutoff, maxUniqueMessages);
       cleanupMessages(warnings, warningsPurgeInformation, cutoff,
diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java
new file mode 100644
index 0000000..45f5d0c
--- /dev/null
+++ b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.logging;
+
+import java.io.StringWriter;
+
+import org.apache.log4j.Appender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+import org.apache.log4j.WriterAppender;
+
+public class LogCapturer {
+  private final StringWriter sw = new StringWriter();
+  private final Appender appender;
+  private final Logger logger;
+
+  public static LogCapturer captureLogs(org.slf4j.Logger logger) {
+    if (logger.getName().equals("root")) {
+      return new LogCapturer(Logger.getRootLogger());
+    }
+    return new LogCapturer(LogManager.getLogger(logger.getName()));
+  }
+
+  private LogCapturer(Logger logger) {
+    this.logger = logger;
+    Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
+    if (defaultAppender == null) {
+      defaultAppender = Logger.getRootLogger().getAppender("console");
+    }
+    final Layout layout =
+        (defaultAppender == null) ? new PatternLayout() : defaultAppender.getLayout();
+    this.appender = new WriterAppender(layout, sw);
+    logger.addAppender(this.appender);
+  }
+
+  public String getOutput() {
+    return sw.toString();
+  }
+
+  public void stopCapturing() {
+    logger.removeAppender(appender);
+  }
+
+  public void clearOutput() {
+    sw.getBuffer().setLength(0);
+  }
+}
diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java
new file mode 100644
index 0000000..4bafb5a
--- /dev/null
+++ b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.logging.test;
+
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.logging.HadoopLoggerUtils;
+
+public class TestSyncLogs {
+
+  private static final Logger LOG = LoggerFactory.getLogger(TestSyncLogs.class);
+
+  @Test
+  public void testSyncLogs() {
+    LOG.info("Testing log sync");
+    HadoopLoggerUtils.syncLogs();
+  }
+
+}
diff --git a/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties
new file mode 100644
index 0000000..ff1468c
--- /dev/null
+++ b/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties
@@ -0,0 +1,18 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=debug,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml
index c292aeb..d2e9933 100644
--- a/hadoop-common-project/hadoop-minikdc/pom.xml
+++ b/hadoop-common-project/hadoop-minikdc/pom.xml
@@ -39,11 +39,6 @@
       <artifactId>kerb-simplekdc</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>compile</scope>
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index f167a07..b0fb888 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -38,6 +38,7 @@
     <module>hadoop-minikdc</module>
     <module>hadoop-kms</module>
     <module>hadoop-registry</module>
+    <module>hadoop-logging</module>
   </modules>
 
   <build>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index b362e00..9a1226e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -87,6 +87,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
       <groupId>org.mock-server</groupId>
       <artifactId>mockserver-netty</artifactId>
       <scope>test</scope>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
index 1fe6dca..d0b8653 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Lists;
 import org.junit.Assert;
 import org.junit.Test;
@@ -61,8 +62,8 @@
   public void testSSLInitFailure() throws Exception {
     Configuration conf = new Configuration();
     conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo");
-    GenericTestUtils.LogCapturer logs =
-        GenericTestUtils.LogCapturer.captureLogs(
+    LogCapturer logs =
+        LogCapturer.captureLogs(
             LoggerFactory.getLogger(URLConnectionFactory.class));
     URLConnectionFactory.newDefaultURLConnectionFactory(conf);
     Assert.assertTrue("Expected log for ssl init failure not found!",
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index a5bf5c1..b9aae62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -182,6 +182,12 @@
       <artifactId>junit-jupiter-params</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
index 0741f1a..9f74337 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
@@ -40,6 +40,7 @@
 import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX;
 import static org.junit.Assert.assertEquals;
@@ -48,8 +49,8 @@
 
   private static final Logger LOG =
       LoggerFactory.getLogger(TestRouterRefreshFairnessPolicyController.class);
-  private final GenericTestUtils.LogCapturer controllerLog =
-      GenericTestUtils.LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG);
+  private final LogCapturer controllerLog =
+      LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG);
 
   private StateStoreDFSCluster cluster;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
index 1f5770b..d4f6827 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.federation.router.FederationUtil;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 import org.slf4j.LoggerFactory;
@@ -179,7 +179,7 @@
 
   private void verifyInstantiationError(Configuration conf, int handlerCount,
       int totalDedicatedHandlers) {
-    GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer
+    LogCapturer logs = LogCapturer
         .captureLogs(LoggerFactory.getLogger(
             StaticRouterRpcFairnessPolicyController.class));
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
index 9ee9692..bb81eaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
@@ -40,7 +40,6 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
@@ -55,6 +54,7 @@
 import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
@@ -322,11 +322,7 @@
       int httpsRequests, int requestsPerService) {
 
     // Attach our own log appender so we can verify output
-    final LogVerificationAppender appender =
-        new LogVerificationAppender();
-    final org.apache.log4j.Logger logger =
-        org.apache.log4j.Logger.getRootLogger();
-    logger.addAppender(appender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     GenericTestUtils.setRootLogLevel(Level.DEBUG);
 
     // Setup and start the Router
@@ -347,8 +343,11 @@
         heartbeatService.getNamenodeStatusReport();
       }
     }
-    assertEquals(httpsRequests * 2, appender.countLinesWithMessage("JMX URL: https://"));
-    assertEquals(httpRequests * 2, appender.countLinesWithMessage("JMX URL: http://"));
+    assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
+        "JMX URL: https://"));
+    assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
+        "JMX URL: http://"));
+    logCapturer.stopCapturing();
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
index d3d3421..3db20a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
@@ -135,6 +135,8 @@
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
+
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.AfterClass;
@@ -2067,8 +2069,8 @@
 
   @Test
   public void testMkdirsWithCallerContext() throws IOException {
-    GenericTestUtils.LogCapturer auditlog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    LogCapturer auditlog =
+        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // Current callerContext is null
     assertNull(CallerContext.getCurrent());
@@ -2094,8 +2096,8 @@
   @Test
   public void testRealUserPropagationInCallerContext()
       throws IOException, InterruptedException {
-    GenericTestUtils.LogCapturer auditlog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    LogCapturer auditlog =
+        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // Current callerContext is null
     assertNull(CallerContext.getCurrent());
@@ -2139,8 +2141,8 @@
 
   @Test
   public void testAddClientIpPortToCallerContext() throws IOException {
-    GenericTestUtils.LogCapturer auditLog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    LogCapturer auditLog =
+        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // 1. ClientIp and ClientPort are not set on the client.
     // Set client context.
@@ -2174,8 +2176,8 @@
 
   @Test
   public void testAddClientIdAndCallIdToCallerContext() throws IOException {
-    GenericTestUtils.LogCapturer auditLog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    LogCapturer auditLog =
+        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // 1. ClientId and ClientCallId are not set on the client.
     // Set client context.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
index 336ea39..caecb69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
@@ -72,6 +72,8 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
+
 import org.junit.Test;
 import org.slf4j.event.Level;
 
@@ -276,12 +278,10 @@
   @Test
   public void testPreviousBlockNotNull()
       throws IOException, URISyntaxException {
-    final GenericTestUtils.LogCapturer stateChangeLog =
-        GenericTestUtils.LogCapturer.captureLogs(NameNode.stateChangeLog);
+    final LogCapturer stateChangeLog = LogCapturer.captureLogs(NameNode.stateChangeLog);
     GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.DEBUG);
 
-    final GenericTestUtils.LogCapturer nameNodeLog =
-        GenericTestUtils.LogCapturer.captureLogs(NameNode.LOG);
+    final LogCapturer nameNodeLog = LogCapturer.captureLogs(NameNode.LOG);
     GenericTestUtils.setLogLevel(NameNode.LOG, Level.DEBUG);
 
     final FederationRPCMetrics metrics = getRouterContext().
@@ -454,8 +454,8 @@
 
   @Test
   public void testCallerContextWithMultiDestinations() throws IOException {
-    GenericTestUtils.LogCapturer auditLog =
-        GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+    LogCapturer auditLog =
+        LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
 
     // set client context
     CallerContext.setCurrent(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 8632c56..5c2df9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -310,14 +310,4 @@
       <Method name="reconcile" />
       <Bug pattern="SWL_SLEEP_WITH_LOCK_HELD" />
     </Match>
-    <!--
-     conversionPattern is only set once and used to initiate PatternLayout object
-     only once. It is set by log4j framework if set as part of log4j properties and accessed
-     only during first append operation.
-    -->
-    <Match>
-      <Class name="org.apache.hadoop.hdfs.util.AsyncRFAAppender"/>
-      <Field name="conversionPattern"/>
-      <Bug pattern="IS2_INCONSISTENT_SYNC"/>
-    </Match>
 </FindBugsFilter>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 5f15649..a8922cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -165,6 +165,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-core</artifactId>
       <scope>test</scope>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
index 21c01ce..a361a28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
@@ -31,6 +31,8 @@
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.metrics2.util.MBeans;
 
 /**
@@ -111,11 +113,8 @@
         .substring(0, maxLogLineLength) + "...");
   }
 
-  // TODO : hadoop-logging module to hide log4j implementation details, this method
-  //  can directly call utility from hadoop-logging.
   private static boolean hasAppenders(Logger logger) {
-    return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders()
-        .hasMoreElements();
+    return HadoopLoggerUtils.hasAppenders(logger.getName());
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
index ab30110..4e8daf3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
@@ -32,11 +32,11 @@
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor;
 import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.util.GSet;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Level;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -110,13 +110,13 @@
   }
 
   static void initLogLevels() {
-    Util.setLogLevel(FSImage.class, Level.TRACE);
-    Util.setLogLevel(FileJournalManager.class, Level.TRACE);
+    Util.setLogLevel(FSImage.class, "TRACE");
+    Util.setLogLevel(FileJournalManager.class, "TRACE");
 
-    Util.setLogLevel(GSet.class, Level.OFF);
-    Util.setLogLevel(BlockManager.class, Level.OFF);
-    Util.setLogLevel(DatanodeManager.class, Level.OFF);
-    Util.setLogLevel(TopMetrics.class, Level.OFF);
+    Util.setLogLevel(GSet.class, "OFF");
+    Util.setLogLevel(BlockManager.class, "OFF");
+    Util.setLogLevel(DatanodeManager.class, "OFF");
+    Util.setLogLevel(TopMetrics.class, "OFF");
   }
 
   static class Util {
@@ -127,11 +127,10 @@
           + ", max=" + StringUtils.byteDesc(runtime.maxMemory());
     }
 
-    static void setLogLevel(Class<?> clazz, Level level) {
-      final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz);
-      logger.setLevel(level);
+    static void setLogLevel(Class<?> clazz, String level) {
+      HadoopLoggerUtils.setLogLevel(clazz.getName(), level);
       LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level,
-          logger.getEffectiveLevel());
+          HadoopLoggerUtils.getEffectiveLevel(clazz.getName()));
     }
 
     static String toCommaSeparatedNumber(long n) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java
deleted file mode 100644
index 10ef47b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.spi.LoggingEvent;
-import org.apache.log4j.spi.ThrowableInformation;
-
-/**
- * Used to verify that certain exceptions or messages are present in log output.
- */
-public class LogVerificationAppender extends AppenderSkeleton {
-  private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
-
-  @Override
-  public boolean requiresLayout() {
-    return false;
-  }
-
-  @Override
-  protected void append(final LoggingEvent loggingEvent) {
-    log.add(loggingEvent);
-  }
-
-  @Override
-  public void close() {
-  }
-
-  public List<LoggingEvent> getLog() {
-    return new ArrayList<LoggingEvent>(log);
-  }
-  
-  public int countExceptionsWithMessage(final String text) {
-    int count = 0;
-    for (LoggingEvent e: getLog()) {
-      ThrowableInformation t = e.getThrowableInformation();
-      if (t != null) {
-        String m = t.getThrowable().getMessage();
-        if (m.contains(text)) {
-          count++;
-        }
-      }
-    }
-    return count;
-  }
-
-  public int countLinesWithMessage(final String text) {
-    int count = 0;
-    for (LoggingEvent e: getLog()) {
-      String msg = e.getRenderedMessage();
-      if (msg != null && msg.contains(text)) {
-        count++;
-      }
-    }
-    return count;
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
index b16f023..75ad5bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
@@ -33,7 +33,8 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
+
 import org.junit.Test;
 
 public class TestDFSRename {
@@ -189,8 +190,8 @@
       final DistributedFileSystem dfs = cluster.getFileSystem();
       Path path = new Path("/test");
       dfs.mkdirs(path);
-      GenericTestUtils.LogCapturer auditLog =
-          GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+      LogCapturer auditLog =
+          LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
       dfs.rename(path, new Path("/dir1"),
           new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
       String auditOut = auditLog.getOutput();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 5469ebb..80424a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -45,9 +45,9 @@
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.IllegalReservedPathException;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Logger;
 import org.junit.Test;
 
 import static org.junit.Assert.*;
@@ -317,9 +317,7 @@
         "imageMD5Digest", "22222222222222222222222222222222");
     
     // Attach our own log appender so we can verify output
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
 
     // Upgrade should now fail
     try {
@@ -331,9 +329,10 @@
       if (!msg.contains("Failed to load FSImage file")) {
         throw ioe;
       }
-      int md5failures = appender.countExceptionsWithMessage(
+      int md5failures = org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
           " is corrupt with MD5 checksum of ");
       assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
+      logCapturer.stopCapturing();
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
index c57ef94..c792386 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
@@ -26,7 +26,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -48,7 +48,7 @@
 
   @Test(timeout = 60000)
   public void testDfsClient() throws IOException, InterruptedException {
-    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory
+    LogCapturer logs = LogCapturer.captureLogs(LoggerFactory
         .getLogger(DataStreamer.class));
     byte[] toWrite = new byte[PACKET_SIZE];
     new Random(1).nextBytes(toWrite);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
index f9336fc..4299c11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
@@ -51,7 +51,7 @@
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.junit.After;
 import org.junit.Before;
@@ -168,9 +168,9 @@
 
     FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
 
-    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+    LogCapturer logs = LogCapturer.captureLogs(
         LoggerFactory.getLogger(SaslDataTransferServer.class));
-    LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
+    LogCapturer logs1 = LogCapturer.captureLogs(
         LoggerFactory.getLogger(DataTransferSaslUtil.class));
     try {
       assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
@@ -239,7 +239,7 @@
     Mockito.doReturn(false).when(spyClient).shouldEncryptData();
     DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
 
-    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+    LogCapturer logs = LogCapturer.captureLogs(
         LoggerFactory.getLogger(DataNode.class));
     try {
       assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
@@ -457,9 +457,9 @@
 
     fs = getFileSystem(conf);
 
-    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+    LogCapturer logs = LogCapturer.captureLogs(
         LoggerFactory.getLogger(SaslDataTransferServer.class));
-    LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
+    LogCapturer logs1 = LogCapturer.captureLogs(
         LoggerFactory.getLogger(DataTransferSaslUtil.class));
     try {
       writeTestDataToFile(fs);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
index 3dd0b7e..c656128 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
@@ -54,7 +54,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Rule;
@@ -138,7 +138,7 @@
     HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
     clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");
 
-    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+    LogCapturer logs = LogCapturer.captureLogs(
         LoggerFactory.getLogger(DataNode.class));
     try {
       doTest(clientConf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
index 82b8b58..84b7c8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
@@ -30,7 +30,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
 import org.junit.After;
 import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index d69051c..5d2a927 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -56,7 +56,7 @@
 import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.junit.Test;
 import org.slf4j.LoggerFactory;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
index d32cde8..7e926a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.Whitebox;
 
 import org.assertj.core.api.Assertions;
@@ -235,8 +236,8 @@
   public void testCheckSafeMode9() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, 3000);
-    GenericTestUtils.LogCapturer logs =
-        GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
+    LogCapturer logs =
+        LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
     BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm,
         fsn, true, conf);
     String content = logs.getOutput();
@@ -247,8 +248,8 @@
   public void testCheckSafeMode10(){
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, -1);
-    GenericTestUtils.LogCapturer logs =
-            GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
+    LogCapturer logs =
+            LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
     BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm,
             fsn, true, conf);
     String content = logs.getOutput();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index ea7347f..87c8383 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -58,7 +58,7 @@
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.slf4j.LoggerFactory;
@@ -575,7 +575,7 @@
         new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     cluster.waitActive();
     DFSTestUtil.setNameNodeLogLevel(Level.DEBUG);
-    LogCapturer logs = GenericTestUtils.LogCapturer
+    LogCapturer logs = LogCapturer
         .captureLogs(LoggerFactory.getLogger("BlockStateChange"));
     BlockManager bm = cluster.getNamesystem().getBlockManager();
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 20163cc..c4b5f7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -21,7 +21,6 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
@@ -41,6 +40,7 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.ContentSummary;
@@ -49,7 +49,6 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -67,16 +66,15 @@
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class TestReplicationPolicy extends BaseReplicationPolicyTest {
@@ -507,26 +505,26 @@
           2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
           (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
     }
-    
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
-    
+
+    final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+
     // try to choose NUM_OF_DATANODES which is more than actually available
     // nodes.
     DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length);
     assertEquals(targets.length, dataNodes.length - 2);
 
-    final List<LoggingEvent> log = appender.getLog();
-    assertNotNull(log);
-    assertFalse(log.size() == 0);
-    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
-    
-    assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
-    // Suppose to place replicas on each node but two data nodes are not
-    // available for placing replica, so here we expect a short of 2
-    assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
-
+    boolean isFound = false;
+    for (String logLine : logCapturer.getOutput().split("\n")) {
+      // Suppose to place replicas on each node but two data nodes are not
+      // available for placing replica, so here we expect a short of 2
+      if(logLine.contains("WARN") && logLine.contains("in need of 2")) {
+        isFound = true;
+        break;
+      }
+    }
+    assertTrue("Could not find the block placement log specific to 2 datanodes not being "
+            + "available for placing replicas", isFound);
+    logCapturer.stopCapturing();
     resetHeartbeatForStorages();
   }
 
@@ -1710,17 +1708,14 @@
 
   @Test
   public void testChosenFailureForStorageType() {
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
-
+    final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1,
         dataNodes[0], new ArrayList<DatanodeStorageInfo>(), false, null,
         BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy(
             HdfsConstants.StoragePolicy.COLD.value()), null);
     assertEquals(0, targets.length);
     assertNotEquals(0,
-        appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE"));
+        StringUtils.countMatches(logCapturer.getOutput(), "NO_REQUIRED_STORAGE_TYPE"));
   }
 
   @Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
index 73201ba..13efcf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
@@ -27,7 +27,6 @@
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.Collections;
-import java.util.List;
 import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
@@ -39,19 +38,15 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Appender;
-import org.apache.log4j.AsyncAppender;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
-import java.util.function.Supplier;
-
 /**
  * Test periodic logging of DataNode metrics.
  */
@@ -128,13 +123,13 @@
   }
 
   @Test
+  @SuppressWarnings("unchecked")
   public void testMetricsLoggerIsAsync() throws IOException {
     startDNForTest(true);
     assertNotNull(dn);
-    org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME);
-    @SuppressWarnings("unchecked")
-    List<Appender> appenders = Collections.list(logger.getAllAppenders());
-    assertTrue(appenders.get(0) instanceof AsyncAppender);
+    assertTrue(Collections.list(
+            org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME).getAllAppenders())
+        .get(0) instanceof org.apache.log4j.AsyncAppender);
   }
 
   /**
@@ -149,27 +144,15 @@
         metricsProvider);
     startDNForTest(true);
     assertNotNull(dn);
-    final PatternMatchingAppender appender =
-        (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME)
-            .getAppender("PATTERNMATCHERAPPENDER");
-
+    LogCapturer logCapturer =
+        LogCapturer.captureLogs(LoggerFactory.getLogger(DataNode.METRICS_LOG_NAME));
     // Ensure that the supplied pattern was matched.
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        return appender.isMatched();
-      }
-    }, 1000, 60000);
-
+    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"),
+        1000, 60000);
+    logCapturer.stopCapturing();
     dn.shutdown();
   }
 
-  private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
-    @SuppressWarnings("unchecked")
-    List<Appender> appenders = Collections.list(logger.getAllAppenders());
-    ((AsyncAppender) appenders.get(0)).addAppender(appender);
-  }
-
   public interface TestFakeMetricMXBean {
     int getFakeMetric();
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 74c70ce..82d7a81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -27,7 +27,6 @@
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
-import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -77,10 +76,9 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.SimpleLayout;
-import org.apache.log4j.WriterAppender;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -414,14 +412,9 @@
   @Test(timeout=600000)
   public void testScanDirectoryStructureWarn() throws Exception {
 
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     //add a logger stream to check what has printed to log
-    ByteArrayOutputStream loggerStream = new ByteArrayOutputStream();
-    org.apache.log4j.Logger rootLogger =
-        org.apache.log4j.Logger.getRootLogger();
     GenericTestUtils.setRootLogLevel(Level.INFO);
-    WriterAppender writerAppender =
-        new WriterAppender(new SimpleLayout(), loggerStream);
-    rootLogger.addAppender(writerAppender);
 
     Configuration conf = getConfiguration();
     cluster = new MiniDFSCluster
@@ -452,7 +445,7 @@
       scan(1, 1, 0, 1, 0, 0, 0);
 
       //ensure the warn log not appear and missing block log do appear
-      String logContent = new String(loggerStream.toByteArray());
+      String logContent = logCapturer.getOutput();
       String missingBlockWarn = "Deleted a metadata file" +
           " for the deleted block";
       String dirStructureWarnLog = " found in invalid directory." +
@@ -464,6 +457,7 @@
       LOG.info("check pass");
 
     } finally {
+      logCapturer.stopCapturing();
       if (scanner != null) {
         scanner.shutdown();
         scanner = null;
@@ -526,7 +520,7 @@
       client = cluster.getFileSystem().getClient();
       conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
       // log trace
-      GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.
+      LogCapturer logCapturer = LogCapturer.
           captureLogs(NameNode.stateChangeLog);
       // Add files with 5 blocks
       createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 5, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
index 8b1a6c0..c7fc71f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import net.jcip.annotations.NotThreadSafe;
+
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
@@ -51,7 +53,6 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -79,10 +80,10 @@
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
-import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -393,9 +394,7 @@
     }
 
     // nth file should hit a capacity exception
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));
 
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -403,11 +402,12 @@
       public Boolean get() {
         // check the log reported by FsDataSetCache
         // in the case that cache capacity is exceeded.
-        int lines = appender.countLinesWithMessage(
+        int lines = StringUtils.countMatches(logCapturer.getOutput(),
             "could not reserve more bytes in the cache: ");
         return lines > 0;
       }
     }, 500, 30000);
+    logCapturer.stopCapturing();
     // Also check the metrics for the failure
     assertTrue("Expected more than 0 failed cache attempts",
         fsd.getNumBlocksFailedToCache() > 0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index 073bb53..8f3ef44 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.hdfs.server.diskbalancer;
 
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Preconditions;
 import java.util.function.Supplier;
 import org.apache.commons.codec.digest.DigestUtils;
@@ -321,7 +322,7 @@
         0);
     DFSTestUtil.waitReplication(fs, filePath, (short) 1);
 
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
+    LogCapturer logCapturer = LogCapturer
         .captureLogs(DiskBalancer.LOG);
 
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java
deleted file mode 100644
index f099dfa..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.util.regex.Pattern;
-
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.spi.LoggingEvent;
-
-/**
- * An appender that matches logged messages against the given
- * regular expression.
- */
-public class PatternMatchingAppender extends AppenderSkeleton {
-  private final Pattern pattern;
-  private volatile boolean matched;
-
-  public PatternMatchingAppender() {
-    this.pattern = Pattern.compile("^.*FakeMetric.*$");
-    this.matched = false;
-  }
-
-  public boolean isMatched() {
-    return matched;
-  }
-
-  @Override
-  protected void append(LoggingEvent event) {
-    if (pattern.matcher(event.getMessage().toString()).matches()) {
-      matched = true;
-    }
-  }
-
-  @Override
-  public void close() {
-  }
-
-  @Override
-  public boolean requiresLayout() {
-    return false;
-  }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index c00649a..617f38a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -37,7 +37,7 @@
 import org.apache.hadoop.security.authorize.ProxyServers;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Lists;
 
 import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
index d34d6ca..fec16c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
@@ -41,7 +41,7 @@
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
index 0f73669..953d1ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -24,7 +24,6 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.List;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
@@ -39,12 +38,9 @@
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.log4j.Appender;
-import org.apache.log4j.AsyncAppender;
-import org.apache.log4j.Logger;
 
 import org.junit.After;
 import org.junit.AfterClass;
@@ -107,6 +103,7 @@
   UserGroupInformation userGroupInfo;
 
   @Before
+  @SuppressWarnings("unchecked")
   public void setupCluster() throws Exception {
     // must configure prior to instantiating the namesystem because it
     // will reconfigure the logger if async is enabled
@@ -122,11 +119,9 @@
     util.createFiles(fs, fileName);
 
     // make sure the appender is what it's supposed to be
-    Logger logger = org.apache.log4j.Logger.getLogger(
-        "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit");
-    @SuppressWarnings("unchecked")
-    List<Appender> appenders = Collections.list(logger.getAllAppenders());
-    assertTrue(appenders.get(0) instanceof AsyncAppender);
+    assertTrue(Collections.list(org.apache.log4j.Logger.getLogger(
+            "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit").getAllAppenders())
+        .get(0) instanceof org.apache.log4j.AsyncAppender);
     
     fnames = util.getFileNames(fileName);
     util.waitReplication(fs, fileName, (short)3);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index d675dcd..ccc6be3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -82,7 +82,7 @@
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil.ExitException;
@@ -863,7 +863,7 @@
         savedSd = sd;
       }
       
-      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+      LogCapturer logs = LogCapturer.captureLogs(
           LoggerFactory.getLogger(Storage.class));
       try {
         // try to lock the storage that's already locked
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index 771caef..73aee34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -49,7 +49,7 @@
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index 17803a0..c68ad18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -83,6 +83,7 @@
 import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
 import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil;
@@ -90,9 +91,6 @@
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -1717,36 +1715,13 @@
     }
   }
 
-  class TestAppender extends AppenderSkeleton {
-    private final List<LoggingEvent> log = new ArrayList<>();
-
-    @Override
-    public boolean requiresLayout() {
-      return false;
-    }
-
-    @Override
-    protected void append(final LoggingEvent loggingEvent) {
-      log.add(loggingEvent);
-    }
-
-    @Override
-    public void close() {
-    }
-
-    public List<LoggingEvent> getLog() {
-      return new ArrayList<>(log);
-    }
-  }
-
   /**
    *
    * @throws Exception
    */
   @Test
   public void testReadActivelyUpdatedLog() throws Exception {
-    final TestAppender appender = new TestAppender();
-    LogManager.getRootLogger().addAppender(appender);
+    final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     // Set single handler thread, so all transactions hit same thread-local ops.
@@ -1794,21 +1769,16 @@
       rwf.close();
 
       events.poll();
-      String pattern = "Caught exception after reading (.*) ops";
-      Pattern r = Pattern.compile(pattern);
-      final List<LoggingEvent> log = appender.getLog();
-      for (LoggingEvent event : log) {
-        Matcher m = r.matcher(event.getRenderedMessage());
-        if (m.find()) {
+      for (String logLine : logCapturer.getOutput().split("\n")) {
+        if (logLine != null && logLine.contains("Caught exception after reading")) {
           fail("Should not try to read past latest syned edit log op");
         }
       }
-
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
-      LogManager.getRootLogger().removeAppender(appender);
+      logCapturer.stopCapturing();
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
index 3b15c2d..fb484cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
@@ -26,6 +26,8 @@
 
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
+
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -118,8 +120,8 @@
     op3.setTransactionId(3);
     buffer.writeOp(op3, fakeLogVersion);
 
-    GenericTestUtils.LogCapturer logs =
-        GenericTestUtils.LogCapturer.captureLogs(EditsDoubleBuffer.LOG);
+    LogCapturer logs =
+        LogCapturer.captureLogs(EditsDoubleBuffer.LOG);
     try {
       buffer.close();
       fail();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 89193ca..860e6b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -64,7 +64,7 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.FakeTimer;
 import org.slf4j.event.Level;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
index f0ae181..afb0491 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
@@ -25,7 +25,7 @@
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.util.FakeTimer;
 import org.apache.hadoop.util.Time;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
index 9c77f9d..08c9240 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
@@ -29,6 +29,8 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
+
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -58,7 +60,7 @@
   private MiniDFSCluster cluster;
   private FileSystem fs;
   private UserGroupInformation userGroupInfo;
-  private GenericTestUtils.LogCapturer logs;
+  private LogCapturer logs;
 
   @Before
   public void setUp() throws Exception {
@@ -76,7 +78,7 @@
     userGroupInfo = UserGroupInformation.createUserForTesting("bob",
         new String[] {"hadoop"});
 
-    logs = GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.LOG);
+    logs = LogCapturer.captureLogs(FSNamesystem.LOG);
     GenericTestUtils
         .setLogLevel(LoggerFactory.getLogger(FSNamesystem.class.getName()),
         org.slf4j.event.Level.INFO);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index a312b03..96650a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -115,7 +115,7 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.AfterClass;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
index 464fdfc..651d4f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
@@ -18,15 +18,13 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.util.function.Supplier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Appender;
-import org.apache.log4j.AsyncAppender;
 
 import org.junit.Rule;
 import org.junit.Test;
@@ -34,7 +32,6 @@
 
 import java.io.IOException;
 import java.util.Collections;
-import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
@@ -64,12 +61,12 @@
   }
 
   @Test
+  @SuppressWarnings("unchecked")
   public void testMetricsLoggerIsAsync() throws IOException {
     makeNameNode(true);
     org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME);
-    @SuppressWarnings("unchecked")
-    List<Appender> appenders = Collections.list(logger.getAllAppenders());
-    assertTrue(appenders.get(0) instanceof AsyncAppender);
+    assertTrue(Collections.list(logger.getAllAppenders()).get(0)
+        instanceof org.apache.log4j.AsyncAppender);
   }
 
   /**
@@ -80,20 +77,14 @@
   public void testMetricsLogOutput()
       throws IOException, InterruptedException, TimeoutException {
     TestFakeMetric metricsProvider = new TestFakeMetric();
-    MBeans.register(this.getClass().getSimpleName(),
-        "DummyMetrics", metricsProvider);
+    MBeans.register(this.getClass().getSimpleName(), "DummyMetrics", metricsProvider);
     makeNameNode(true);     // Log metrics early and often.
-    final PatternMatchingAppender appender =
-        (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME)
-            .getAppender("PATTERNMATCHERAPPENDER");
+    LogCapturer logCapturer =
+        LogCapturer.captureLogs(LoggerFactory.getLogger(NameNode.METRICS_LOG_NAME));
 
-    // Ensure that the supplied pattern was matched.
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        return appender.isMatched();
-      }
-    }, 1000, 60000);
+    GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"),
+        1000, 60000);
+    logCapturer.stopCapturing();
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
index 073ee37..8750154 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
@@ -28,7 +28,8 @@
 import org.junit.Test;
 
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+
+import org.apache.hadoop.logging.LogCapturer;
 
 public class TestNameNodeResourcePolicy {
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 67c8f3c..7ea0b24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -52,7 +52,6 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -69,12 +68,12 @@
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -524,10 +523,8 @@
         // Corrupt the md5 files in all the namedirs
         corruptFSImageMD5(true);
 
-        // Attach our own log appender so we can verify output
-        final LogVerificationAppender appender = new LogVerificationAppender();
-        final Logger logger = Logger.getRootLogger();
-        logger.addAppender(appender);
+      // Attach our own log appender so we can verify output
+      LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
 
         // Try to start a new cluster
         LOG.info("\n===========================================\n" +
@@ -541,10 +538,13 @@
         } catch (IOException ioe) {
           GenericTestUtils.assertExceptionContains(
               "Failed to load FSImage file", ioe);
-          int md5failures = appender.countExceptionsWithMessage(
-              " is corrupt with MD5 checksum of ");
+
+          int md5failures =
+              org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
+                  " is corrupt with MD5 checksum of ");
           // Two namedirs, so should have seen two failures
           assertEquals(2, md5failures);
+          logCapturer.stopCapturing();
         }
     } finally {
       if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index 0e83bec..7376237 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -43,7 +43,7 @@
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -197,7 +197,7 @@
 
     // Trying to bootstrap standby should now fail since the edit
     // logs aren't available in the shared dir.
-    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+    LogCapturer logs = LogCapturer.captureLogs(
         LoggerFactory.getLogger(BootstrapStandby.class));
     try {
       assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
index 1682731..6fa979d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
@@ -44,6 +44,7 @@
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.Whitebox;
 import org.junit.After;
 import org.junit.Before;
@@ -143,7 +144,7 @@
         () -> (DistributedFileSystem) FileSystem.get(conf));
 
     GenericTestUtils.setLogLevel(ObserverReadProxyProvider.LOG, Level.DEBUG);
-    GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer
+    LogCapturer logCapture = LogCapturer
         .captureLogs(ObserverReadProxyProvider.LOG);
     try {
       dfs.access(new Path("/"), FsAction.READ);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index 513f60c..3dbadca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -37,7 +37,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.LogVerificationAppender;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.common.Util;
@@ -48,12 +47,12 @@
 import org.apache.hadoop.io.compress.CompressionOutputStream;
 import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ThreadUtil;
-import org.apache.log4j.spi.LoggingEvent;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -299,39 +298,38 @@
   @Test(timeout = 30000)
   public void testCheckpointBeforeNameNodeInitializationIsComplete()
       throws Exception {
-    final LogVerificationAppender appender = new LogVerificationAppender();
-    final org.apache.log4j.Logger logger = org.apache.log4j.Logger
-        .getRootLogger();
-    logger.addAppender(appender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
 
-    // Transition 2 to observer
-    cluster.transitionToObserver(2);
-    doEdits(0, 10);
-    // After a rollEditLog, Standby(nn1)'s next checkpoint would be
-    // ahead of observer(nn2).
-    nns[0].getRpcServer().rollEditLog();
+    try {
+      // Transition 2 to observer
+      cluster.transitionToObserver(2);
+      doEdits(0, 10);
+      // After a rollEditLog, Standby(nn1)'s next checkpoint would be
+      // ahead of observer(nn2).
+      nns[0].getRpcServer().rollEditLog();
 
-    NameNode nn2 = nns[2];
-    FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
+      NameNode nn2 = nns[2];
+      FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
 
-    // After standby creating a checkpoint, it will try to push the image to
-    // active and all observer, updating it's own txid to the most recent.
-    HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
-    HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
+      // After standby creating a checkpoint, it will try to push the image to
+      // active and all observer, updating it's own txid to the most recent.
+      HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
+      HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
 
-    NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
-    cluster.transitionToStandby(2);
-    logger.removeAppender(appender);
+      NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
+      cluster.transitionToStandby(2);
 
-    for (LoggingEvent event : appender.getLog()) {
-      String message = event.getRenderedMessage();
-      if (message.contains("PutImage failed") &&
-          message.contains("FSImage has not been set in the NameNode.")) {
-        //Logs have the expected exception.
-        return;
+      for (String logLine : logCapturer.getOutput().split("\n")) {
+        if (logLine != null && logLine.contains("PutImage failed") && logLine.contains(
+            "FSImage has not been set in the NameNode.")) {
+          //Logs have the expected exception.
+          return;
+        }
       }
+      fail("Expected exception not present in logs.");
+    } finally {
+      logCapturer.stopCapturing();
     }
-    fail("Expected exception not present in logs.");
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 58d72f1..3741bbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -93,7 +93,7 @@
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.junit.After;
@@ -1372,7 +1372,7 @@
       Path filePath = new Path("/zeroSizeFile");
       DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0);
       fs.setReplication(filePath, (short) 3);
-      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
+      LogCapturer logs = LogCapturer.captureLogs(
           LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class));
       fs.setStoragePolicy(filePath, "COLD");
       fs.satisfyStoragePolicy(filePath);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
index 368deef..b739b25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
@@ -22,9 +22,6 @@
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
 
-# Only to be used for testing
-log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender
-
 #
 # NameNode metrics logging.
 # The default is to retain two namenode-metrics.log files up to 64MB each.
@@ -32,10 +29,10 @@
 
 # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as
 # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref
-namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA,PATTERNMATCHERAPPENDER
+namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA
 log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
 log4j.additivity.NameNodeMetricsLog=false
-log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender
+log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender
 log4j.appender.ASYNCNNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n
 log4j.appender.ASYNCNNMETRICSRFA.maxFileSize=64MB
 log4j.appender.ASYNCNNMETRICSRFA.fileName=${hadoop.log.dir}/namenode-metrics.log
@@ -48,10 +45,10 @@
 
 # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as
 # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref
-datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA,PATTERNMATCHERAPPENDER
+datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA
 log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
 log4j.additivity.DataNodeMetricsLog=false
-log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender
+log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender
 log4j.appender.ASYNCDNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n
 log4j.appender.ASYNCDNMETRICSRFA.maxFileSize=64MB
 log4j.appender.ASYNCDNMETRICSRFA.fileName=${hadoop.log.dir}/datanode-metrics.log
@@ -72,7 +69,7 @@
 hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
 log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.hdfs.util.AsyncRFAAppender
+log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.logging.appenders.AsyncRFAAppender
 log4j.appender.ASYNCAUDITAPPENDER.blocking=false
 log4j.appender.ASYNCAUDITAPPENDER.bufferSize=256
 log4j.appender.ASYNCAUDITAPPENDER.conversionPattern=%m%n
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index e3b3511..142c1ab 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -124,6 +124,12 @@
       <artifactId>assertj-core</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
index 15682ee..cb5f3ed 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -36,9 +36,10 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
+
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent;
 import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
 import org.junit.After;
@@ -107,12 +108,10 @@
 import org.apache.hadoop.yarn.util.ControlledClock;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
@@ -128,29 +127,6 @@
     }
   }
 
-  private static class TestAppender extends AppenderSkeleton {
-
-    private final List<LoggingEvent> logEvents = new CopyOnWriteArrayList<>();
-
-    @Override
-    public boolean requiresLayout() {
-      return false;
-    }
-
-    @Override
-    public void close() {
-    }
-
-    @Override
-    protected void append(LoggingEvent arg0) {
-      logEvents.add(arg0);
-    }
-
-    private List<LoggingEvent> getLogEvents() {
-      return logEvents;
-    }
-  }
-
   @BeforeClass
   public static void setupBeforeClass() {
     ResourceUtils.resetResourceTypes(new Configuration());
@@ -1724,11 +1700,10 @@
     for (String memoryName : ImmutableList.of(
         MRJobConfig.RESOURCE_TYPE_NAME_MEMORY,
         MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) {
-      TestAppender testAppender = new TestAppender();
-      final Logger logger = Logger.getLogger(TaskAttemptImpl.class);
+      final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class);
+      LogCapturer logCapturer = LogCapturer.captureLogs(logger);
       try {
         TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear();
-        logger.addAppender(testAppender);
         EventHandler eventHandler = mock(EventHandler.class);
         Clock clock = SystemClock.getInstance();
         JobConf jobConf = new JobConf();
@@ -1741,13 +1716,11 @@
             getResourceInfoFromContainerRequest(taImpl, eventHandler).
             getMemorySize();
         assertEquals(3072, memorySize);
-        assertTrue(testAppender.getLogEvents().stream()
-            .anyMatch(e -> e.getLevel() == Level.WARN && ("Configuration " +
-                "mapreduce.reduce.resource." + memoryName + "=3Gi is " +
-                "overriding the mapreduce.reduce.memory.mb=2048 configuration")
-                    .equals(e.getMessage())));
+        assertTrue(logCapturer.getOutput().contains(
+            "Configuration " + "mapreduce.reduce.resource." + memoryName + "=3Gi is "
+                + "overriding the mapreduce.reduce.memory.mb=2048 configuration"));
       } finally {
-        logger.removeAppender(testAppender);
+        logCapturer.stopCapturing();
       }
     }
   }
@@ -1809,10 +1782,9 @@
 
   @Test
   public void testReducerCpuRequestOverriding() {
-    TestAppender testAppender = new TestAppender();
-    final Logger logger = Logger.getLogger(TaskAttemptImpl.class);
+    final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class);
+    final LogCapturer logCapturer = LogCapturer.captureLogs(logger);
     try {
-      logger.addAppender(testAppender);
       EventHandler eventHandler = mock(EventHandler.class);
       Clock clock = SystemClock.getInstance();
       JobConf jobConf = new JobConf();
@@ -1825,13 +1797,11 @@
           getResourceInfoFromContainerRequest(taImpl, eventHandler).
           getVirtualCores();
       assertEquals(7, vCores);
-      assertTrue(testAppender.getLogEvents().stream().anyMatch(
-          e -> e.getLevel() == Level.WARN && ("Configuration " +
-              "mapreduce.reduce.resource.vcores=7 is overriding the " +
-              "mapreduce.reduce.cpu.vcores=9 configuration").equals(
-                  e.getMessage())));
+      assertTrue(logCapturer.getOutput().contains(
+          "Configuration " + "mapreduce.reduce.resource.vcores=7 is overriding the "
+              + "mapreduce.reduce.cpu.vcores=9 configuration"));
     } finally {
-      logger.removeAppender(testAppender);
+      logCapturer.stopCapturing();
     }
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index 7530428..d124c97 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -72,6 +72,12 @@
       <artifactId>assertj-core</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
index a0223de..43ab170 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
@@ -23,12 +23,10 @@
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.Flushable;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.util.ArrayList;
-import java.util.Enumeration;
 import java.util.List;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
@@ -44,16 +42,13 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.util.ProcessTree;
 import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.log4j.Appender;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
@@ -276,42 +271,7 @@
     }
 
     // flush & close all appenders
-    LogManager.shutdown(); 
-  }
-
-  @SuppressWarnings("unchecked")
-  public static synchronized void syncLogs() {
-    // flush standard streams
-    //
-    System.out.flush();
-    System.err.flush();
-
-    // flush flushable appenders
-    //
-    final Logger rootLogger = Logger.getRootLogger();
-    flushAppenders(rootLogger);
-    final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository().
-      getCurrentLoggers();
-    while (allLoggers.hasMoreElements()) {
-      final Logger l = allLoggers.nextElement();
-      flushAppenders(l);
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private static void flushAppenders(Logger l) {
-    final Enumeration<Appender> allAppenders = l.getAllAppenders();
-    while (allAppenders.hasMoreElements()) {
-      final Appender a = allAppenders.nextElement();
-      if (a instanceof Flushable) {
-        try {
-          ((Flushable) a).flush();
-        } catch (IOException ioe) {
-          System.err.println(a + ": Failed to flush!"
-            + StringUtils.stringifyException(ioe));
-        }
-      }
-    }
+    HadoopLoggerUtils.shutdownLogManager();
   }
 
   public static ScheduledExecutorService createLogSyncer() {
@@ -336,7 +296,7 @@
         new Runnable() {
           @Override
           public void run() {
-            TaskLog.syncLogs();
+            HadoopLoggerUtils.syncLogs();
           }
         }, 0L, 5L, TimeUnit.SECONDS);
     return scheduler;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
index e91b4c1..f83835f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
@@ -28,24 +28,19 @@
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
-import java.io.ByteArrayOutputStream;
 import java.io.IOException;
-import java.io.LineNumberReader;
-import java.io.StringReader;
 
 import org.junit.Before;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.mapred.TaskReport;
 import org.apache.hadoop.mapreduce.JobStatus.State;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.log4j.Layout;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.WriterAppender;
 import org.mockito.stubbing.Answer;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test to make sure that command line output for 
@@ -73,55 +68,53 @@
 
   @Test
   public void testJobMonitorAndPrint() throws Exception {
-    JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f,
-        0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
-        "tmp-queue", "tmp-jobfile", "tmp-url", true);
-    JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f,
-        1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname",
-        "tmp-queue", "tmp-jobfile", "tmp-url", true);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(Job.class));
+    try {
+      JobStatus jobStatus_1 =
+          new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING,
+              JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url",
+              true);
+      JobStatus jobStatus_2 =
+          new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH,
+              "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true);
 
-    doAnswer((Answer<TaskCompletionEvent[]>) invocation ->
-        TaskCompletionEvent.EMPTY_ARRAY).when(job)
-        .getTaskCompletionEvents(anyInt(), anyInt());
+      doAnswer((Answer<TaskCompletionEvent[]>) invocation -> TaskCompletionEvent.EMPTY_ARRAY).when(
+          job).getTaskCompletionEvents(anyInt(), anyInt());
 
-    doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
-    when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
-    // setup the logger to capture all logs
-    Layout layout =
-        Logger.getRootLogger().getAppender("stdout").getLayout();
-    ByteArrayOutputStream os = new ByteArrayOutputStream();
-    WriterAppender appender = new WriterAppender(layout, os);
-    appender.setThreshold(Level.ALL);
-    Logger qlogger = Logger.getLogger(Job.class);
-    qlogger.addAppender(appender);
+      doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
+      when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
 
-    job.monitorAndPrintJob();
+      job.monitorAndPrintJob();
 
-    qlogger.removeAppender(appender);
-    LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
-    String line;
-    boolean foundHundred = false;
-    boolean foundComplete = false;
-    boolean foundUber = false;
-    String uberModeMatch = "uber mode : true";
-    String progressMatch = "map 100% reduce 100%";
-    String completionMatch = "completed successfully";
-    while ((line = r.readLine()) != null) {
-      if (line.contains(uberModeMatch)) {
-        foundUber = true;
+      boolean foundHundred = false;
+      boolean foundComplete = false;
+      boolean foundUber = false;
+      String uberModeMatch = "uber mode : true";
+      String progressMatch = "map 100% reduce 100%";
+      String completionMatch = "completed successfully";
+      for (String logLine : logCapturer.getOutput().split("\n")) {
+        if (logLine.contains(uberModeMatch)) {
+          foundUber = true;
+        }
+        if (logLine.contains(progressMatch)) {
+          foundHundred = true;
+        }
+        if (logLine.contains(completionMatch)) {
+          foundComplete = true;
+        }
+        if (foundUber && foundHundred && foundComplete) {
+          break;
+        }
       }
-      foundHundred = line.contains(progressMatch);      
-      if (foundHundred)
-        break;
-    }
-    line = r.readLine();
-    foundComplete = line.contains(completionMatch);
-    assertTrue(foundUber);
-    assertTrue(foundHundred);
-    assertTrue(foundComplete);
+      assertTrue(foundUber);
+      assertTrue(foundHundred);
+      assertTrue(foundComplete);
 
-    System.out.println("The output of job.toString() is : \n" + job.toString());
-    assertTrue(job.toString().contains("Number of maps: 5\n"));
-    assertTrue(job.toString().contains("Number of reduces: 5\n"));
+      System.out.println("The output of job.toString() is : \n" + job.toString());
+      assertTrue(job.toString().contains("Number of maps: 5\n"));
+      assertTrue(job.toString().contains("Number of reduces: 5\n"));
+    } finally {
+      logCapturer.stopCapturing();
+    }
   }
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 17358a3..632e972 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -128,6 +128,12 @@
       <artifactId>assertj-core</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
  <profiles>
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
index 0bdc721..063f185 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
@@ -34,7 +34,6 @@
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
-import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -45,7 +44,6 @@
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.conf.Configuration;
@@ -55,6 +53,7 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.JobPriority;
 import org.apache.hadoop.mapreduce.JobStatus.State;
@@ -110,13 +109,6 @@
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
-import org.apache.log4j.Appender;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Layout;
-import org.apache.log4j.Level;
-import org.apache.log4j.SimpleLayout;
-import org.apache.log4j.WriterAppender;
-import org.apache.log4j.spi.LoggingEvent;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -144,29 +136,6 @@
           MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.lastIndexOf("%"));
   private static final String CUSTOM_RESOURCE_NAME = "a-custom-resource";
 
-  private static class TestAppender extends AppenderSkeleton {
-
-    private final List<LoggingEvent> logEvents = new CopyOnWriteArrayList<>();
-
-    @Override
-    public boolean requiresLayout() {
-      return false;
-    }
-
-    @Override
-    public void close() {
-    }
-
-    @Override
-    protected void append(LoggingEvent arg0) {
-      logEvents.add(arg0);
-    }
-
-    private List<LoggingEvent> getLogEvents() {
-      return logEvents;
-    }
-  }
-
   private YARNRunner yarnRunner;
   private ResourceMgrDelegate resourceMgrDelegate;
   private YarnConfiguration conf;
@@ -549,38 +518,48 @@
       assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex);
     }
   }
+
   @Test(timeout=20000)
   public void testWarnCommandOpts() throws Exception {
-    org.apache.log4j.Logger logger =
-        org.apache.log4j.Logger.getLogger(YARNRunner.class);
-    
-    ByteArrayOutputStream bout = new ByteArrayOutputStream();
-    Layout layout = new SimpleLayout();
-    Appender appender = new WriterAppender(layout, bout);
-    logger.addAppender(appender);
-    
-    JobConf jobConf = new JobConf();
-    
-    jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
-    jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
-    
-    YARNRunner yarnRunner = new YARNRunner(jobConf);
-    
-    @SuppressWarnings("unused")
-    ApplicationSubmissionContext submissionContext =
-        buildSubmitContext(yarnRunner, jobConf);
-   
-    String logMsg = bout.toString();
-    assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + 
-    		"yarn.app.mapreduce.am.admin-command-opts can cause programs to no " +
-        "longer function if hadoop native libraries are used. These values " + 
-    		"should be set as part of the LD_LIBRARY_PATH in the app master JVM " +
-        "env using yarn.app.mapreduce.am.admin.user.env config settings."));
-    assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + 
-        "yarn.app.mapreduce.am.command-opts can cause programs to no longer " +
-        "function if hadoop native libraries are used. These values should " +
-        "be set as part of the LD_LIBRARY_PATH in the app master JVM env " +
-        "using yarn.app.mapreduce.am.env config settings."));
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class));
+    try {
+      JobConf jobConf = new JobConf();
+
+      jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
+          "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
+      jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
+
+      YARNRunner yarnRunner = new YARNRunner(jobConf);
+
+      @SuppressWarnings("unused")
+      ApplicationSubmissionContext submissionContext = buildSubmitContext(yarnRunner, jobConf);
+
+      boolean isFoundOne = false;
+      boolean isFoundTwo = false;
+      for (String logLine : logCapturer.getOutput().split("\n")) {
+        if (logLine == null) {
+          continue;
+        }
+        if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in "
+            + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no "
+            + "longer function if hadoop native libraries are used. These values "
+            + "should be set as part of the LD_LIBRARY_PATH in the app master JVM "
+            + "env using yarn.app.mapreduce.am.admin.user.env config settings.")) {
+          isFoundOne = true;
+        }
+        if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in "
+            + "yarn.app.mapreduce.am.command-opts can cause programs to no longer "
+            + "function if hadoop native libraries are used. These values should "
+            + "be set as part of the LD_LIBRARY_PATH in the app master JVM env "
+            + "using yarn.app.mapreduce.am.env config settings.")) {
+          isFoundTwo = true;
+        }
+      }
+      assertTrue(isFoundOne);
+      assertTrue(isFoundTwo);
+    } finally {
+      logCapturer.stopCapturing();
+    }
   }
 
   @Test(timeout=20000)
@@ -996,10 +975,7 @@
     for (String memoryName : ImmutableList.of(
         MRJobConfig.RESOURCE_TYPE_NAME_MEMORY,
         MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) {
-      TestAppender testAppender = new TestAppender();
-      org.apache.log4j.Logger  logger =
-          org.apache.log4j.Logger.getLogger(YARNRunner.class);
-      logger.addAppender(testAppender);
+      LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class));
       try {
         JobConf jobConf = new JobConf();
         jobConf.set(MRJobConfig.MR_AM_RESOURCE_PREFIX + memoryName, "3 Gi");
@@ -1017,13 +993,17 @@
 
         long memorySize = resourceRequest.getCapability().getMemorySize();
         Assert.assertEquals(3072, memorySize);
-        assertTrue(testAppender.getLogEvents().stream().anyMatch(
-            e -> e.getLevel() == Level.WARN && ("Configuration " +
-                "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " +
-                "overriding the yarn.app.mapreduce.am.resource.mb=2048 " +
-                "configuration").equals(e.getMessage())));
+        boolean isLogFound = false;
+        for (String logLine : logCapturer.getOutput().split("\n")) {
+          if (logLine != null && logLine.contains("WARN") && logLine.contains(
+              "Configuration " + "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is "
+                  + "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + "configuration")) {
+            isLogFound = true;
+          }
+        }
+        assertTrue("Log line could not be found", isLogFound);
       } finally {
-        logger.removeAppender(testAppender);
+        logCapturer.stopCapturing();
       }
     }
   }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
index 338f117..cc93e56 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
@@ -29,8 +29,6 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.HadoopTestCase;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.log4j.Level;
-import org.junit.Before;
 import org.junit.Test;
 
 import static org.junit.Assert.assertTrue;
@@ -76,12 +74,10 @@
                      mapJavaOpts, 
                      mapJavaOpts, MAP_OPTS_VAL);
       }
-      
-      Level logLevel = 
-        Level.toLevel(conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, 
-                               Level.INFO.toString()));  
-      assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + 
-                   logLevel, logLevel, Level.OFF);
+
+      String logLevel = conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "INFO");
+      assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel,
+          "OFF");
     }
   }
   
@@ -108,12 +104,10 @@
                      reduceJavaOpts, 
                      reduceJavaOpts, REDUCE_OPTS_VAL);
       }
-      
-      Level logLevel = 
-        Level.toLevel(conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, 
-                               Level.INFO.toString()));  
-      assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + 
-                   logLevel, logLevel, Level.OFF);
+
+      String logLevel = conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "INFO");
+      assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel,
+          "OFF");
     }
   }
   
@@ -127,9 +121,9 @@
       conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, MAP_OPTS_VAL);
       conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, REDUCE_OPTS_VAL);
     }
-    
-    conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, Level.OFF.toString());
-    conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, Level.OFF.toString());
+
+    conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "OFF");
+    conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "OFF");
     
     Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, 
                 numMaps, numReds);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
index 9e58d46..d1fc8c0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
@@ -25,6 +25,7 @@
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.Assert;
@@ -50,8 +51,6 @@
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -64,8 +63,7 @@
   @Test
   public void testDelegationToken() throws Exception {
 
-    org.apache.log4j.Logger rootLogger = LogManager.getRootLogger();
-    rootLogger.setLevel(Level.DEBUG);
+    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
 
     final YarnConfiguration conf = new YarnConfiguration(new JobConf());
     // Just a random principle
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
index 43d3abe..f653ce7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
@@ -99,7 +99,6 @@
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager.WorkflowPriorityMapping;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -557,9 +556,9 @@
           systemClasses);
     }
     sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB);
-    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
-    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
-    sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
+    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL");
+    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL");
+    sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, "ALL");
     sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
     final SleepJob sleepJob = new SleepJob();
     sleepJob.setConf(sleepConf);
@@ -856,11 +855,11 @@
 
     final SleepJob sleepJob = new SleepJob();
     final JobConf sleepConf = new JobConf(mrCluster.getConfig());
-    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
+    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL");
     final long userLogKb = 4;
     sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb);
     sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3);
-    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
+    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL");
     final long amLogKb = 7;
     sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb);
     sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7);
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c4dfd2f..3ebab5a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1944,6 +1944,18 @@
         <artifactId>log4j-web</artifactId>
         <version>${log4j2.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-logging</artifactId>
+        <version>${hadoop.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-logging</artifactId>
+        <version>${hadoop.version}</version>
+        <scope>test</scope>
+        <type>test-jar</type>
+      </dependency>
     </dependencies>
   </dependencyManagement>
 
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index e8c5fb7..373b5a0 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -349,7 +349,12 @@
       <artifactId>hamcrest-library</artifactId>
       <scope>test</scope>
     </dependency>
-
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <profiles>
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
index 1e7330f..2a124c1 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
@@ -32,7 +32,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
index 476d7a4..6acab8f 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
@@ -23,7 +23,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.logging.LogCapturer;
 
 import org.junit.Test;
 import org.slf4j.Logger;
diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml
index 5194e51..06c2e19 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -83,6 +83,12 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdfs</artifactId>
       <scope>test</scope>
       <exclusions>
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
index aa42cb9..d54fbaa 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
@@ -41,6 +41,7 @@
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCp;
 import org.apache.hadoop.tools.DistCpConstants;
@@ -701,8 +702,8 @@
     GenericTestUtils
         .createFiles(remoteFS, source, getDepth(), getWidth(), getWidth());
 
-    GenericTestUtils.LogCapturer log =
-        GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG);
+    LogCapturer log =
+        LogCapturer.captureLogs(SimpleCopyListing.LOG);
 
     String options = "-useiterator -update -delete" + getDefaultCLIOptions();
     DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
index 02fd48a..661573f 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
@@ -27,11 +27,10 @@
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.tools.rumen.datatypes.*;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 
 /**
  * A default parser for MapReduce job configuration properties.
@@ -83,7 +82,7 @@
   
   // turn off the warning w.r.t deprecated mapreduce keys
   static {
-    Logger.getLogger(Configuration.class).setLevel(Level.OFF);
+    HadoopLoggerUtils.setLogLevel(Configuration.class.getName(), "OFF");
   }
     
   // Accepts a key if there is a corresponding key in the current mapreduce
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 81e8884..d901513 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -576,16 +576,6 @@
     <Bug pattern="SQL_PREPARED_STATEMENT_GENERATED_FROM_NONCONSTANT_STRING" />
   </Match>
   
-  <!-- Following fields are used in ErrorsAndWarningsBlock, which is not a part of analysis of findbugs -->
-  <Match>
-    <Class name="org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender$Element" />
-    <Or>
-      <Field name="count" />
-      <Field name="timestampSeconds" />
-    </Or>
-    <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
-  </Match>
-
   <Match>
     <Class name="org.apache.hadoop.yarn.api.records.ResourceRequest" />
     <Method name="equals" />
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index a15c78e..b41923e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -63,6 +63,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -126,7 +127,6 @@
 import org.apache.hadoop.yarn.util.TimelineServiceHelper;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
-import org.apache.log4j.LogManager;
 
 import org.apache.hadoop.classification.VisibleForTesting;
 import com.sun.jersey.api.client.ClientHandlerException;
@@ -403,7 +403,7 @@
       result = appMaster.finish();
     } catch (Throwable t) {
       LOG.error("Error running ApplicationMaster", t);
-      LogManager.shutdown();
+      HadoopLoggerUtils.shutdownLogManager();
       ExitUtil.terminate(1, t);
     } finally {
       if (appMaster != null) {
@@ -529,7 +529,7 @@
     //Check whether customer log4j.properties file exists
     if (fileExist(log4jPath)) {
       try {
-        Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class,
+        HadoopLoggerUtils.updateLog4jConfiguration(ApplicationMaster.class,
             log4jPath);
       } catch (Exception e) {
         LOG.warn("Can not set up custom log4j properties. " + e);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 098f398..dc23682 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -52,6 +52,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -451,7 +452,7 @@
     if (cliParser.hasOption("log_properties")) {
       String log4jPath = cliParser.getOptionValue("log_properties");
       try {
-        Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath);
+        HadoopLoggerUtils.updateLog4jConfiguration(Client.class, log4jPath);
       } catch (Exception e) {
         LOG.warn("Can not set up custom log4j properties. " + e);
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java
deleted file mode 100644
index 0301a68..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.applications.distributedshell;
-
-import java.io.FileInputStream;
-import java.io.InputStream;
-import java.util.Map.Entry;
-import java.util.Properties;
-
-import org.apache.log4j.LogManager;
-import org.apache.log4j.PropertyConfigurator;
-
-public class Log4jPropertyHelper {
-
-  public static void updateLog4jConfiguration(Class<?> targetClass,
-      String log4jPath) throws Exception {
-    Properties customProperties = new Properties();
-    try (
-        FileInputStream fs = new FileInputStream(log4jPath);
-        InputStream is = targetClass.getResourceAsStream("/log4j.properties")) {
-      customProperties.load(fs);
-      Properties originalProperties = new Properties();
-      originalProperties.load(is);
-      for (Entry<Object, Object> entry : customProperties.entrySet()) {
-        originalProperties.setProperty(entry.getKey().toString(), entry
-            .getValue().toString());
-      }
-      LogManager.resetConfiguration();
-      PropertyConfigurator.configure(originalProperties);
-    }
-  }
-}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java
index 60c06e9..5534653 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java
@@ -43,7 +43,6 @@
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.client.util.YarnClientUtils;
-import org.apache.log4j.Logger;
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.servlet.ServletContextHandler;
@@ -52,6 +51,8 @@
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test Spnego Client Login.
@@ -76,8 +77,7 @@
 
   private Map<String, String> props;
   private static Server server;
-  private static Logger LOG = Logger
-      .getLogger(TestSecureApiServiceClient.class);
+  private static Logger LOG = LoggerFactory.getLogger(TestSecureApiServiceClient.class);
   private ApiServiceClient asc;
 
   /**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
index f8f948d..52ae876 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java
@@ -30,7 +30,6 @@
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType;
-import org.apache.log4j.Logger;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -49,8 +48,6 @@
  */
 public class TestComponent {
 
-  static final Logger LOG = Logger.getLogger(TestComponent.class);
-
   @Rule
   public ServiceTestUtils.ServiceFSWatcher rule =
       new ServiceTestUtils.ServiceFSWatcher();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
index fa5a587..4fc87d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FilterFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
@@ -40,7 +41,6 @@
 import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcase;
 import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder;
 import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder.AppDescriptor;
-import org.apache.log4j.Level;
 
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT;
 import static org.apache.hadoop.yarn.logaggregation.LogAggregationTestUtils.enableFileControllers;
@@ -67,7 +67,7 @@
 
   @BeforeAll
   public static void beforeClass() {
-    org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG);
+    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
   }
 
   @BeforeEach
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
index 346239f..0fd2841 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
@@ -28,6 +28,7 @@
 import org.slf4j.Marker;
 import org.slf4j.MarkerFactory;
 
+import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
index 6b0570a..c04fba0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
-import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
index 4128546..05031ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java
@@ -24,7 +24,7 @@
 import org.apache.hadoop.util.GenericsUtil;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.security.AdminACLsManager;
-import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
index 87d511b..8e24e8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java
@@ -20,7 +20,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
-import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
index c849619..12b6dd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
@@ -50,11 +50,12 @@
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
-import org.apache.log4j.Logger;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertEquals;
@@ -63,8 +64,7 @@
 
 public class TestContainersMonitorResourceChange {
 
-  static final Logger LOG = Logger
-      .getLogger(TestContainersMonitorResourceChange.class);
+  static final Logger LOG = LoggerFactory.getLogger(TestContainersMonitorResourceChange.class);
   private ContainersMonitorImpl containersMonitor;
   private MockExecutor executor;
   private Configuration conf;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 9d096d2..7ea8a62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -245,6 +245,13 @@
       <scope>test</scope>
     </dependency>
 
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-logging</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.apache.hadoop</groupId>
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index dc69eba..80cc9fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -32,7 +32,6 @@
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.log4j.Logger;
 
 import java.util.Collections;
 import java.util.HashMap;
@@ -42,6 +41,9 @@
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.LongBinaryOperator;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * In-memory mapping between applications/container-tags and nodes/racks.
  * Required by constrained affinity/anti-affinity and cardinality placement.
@@ -50,8 +52,7 @@
 @InterfaceStability.Unstable
 public class AllocationTagsManager {
 
-  private static final Logger LOG = Logger.getLogger(
-      AllocationTagsManager.class);
+  private static final Logger LOG = LoggerFactory.getLogger(AllocationTagsManager.class);
 
   private ReentrantReadWriteLock.ReadLock readLock;
   private ReentrantReadWriteLock.WriteLock writeLock;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
index c17d4f6..15e2d34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
@@ -22,7 +22,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
-import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
index 9a85315..12b017a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -231,8 +232,8 @@
     conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INITIAL_DELAY, 10);
     conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId.getId());
 
-    GenericTestUtils.LogCapturer logCapture =
-        GenericTestUtils.LogCapturer.captureLogs(FederationStateStoreService.LOG);
+    LogCapturer logCapture =
+        LogCapturer.captureLogs(FederationStateStoreService.LOG);
 
     final MockRM rm = new MockRM(conf);
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index a1989d5..dc2d18d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -28,17 +28,13 @@
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
@@ -83,6 +79,7 @@
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.LoggerFactory;
 
 public class TestSystemMetricsPublisherForV2 {
 
@@ -301,42 +298,15 @@
   @Test(timeout = 10000)
   public void testPutEntityWhenNoCollector() throws Exception {
     // Validating the logs as DrainDispatcher won't throw exception
-    class TestAppender extends AppenderSkeleton {
-      private final List<LoggingEvent> log = new ArrayList<>();
-
-      @Override
-      public boolean requiresLayout() {
-        return false;
-      }
-
-      @Override
-      protected void append(final LoggingEvent loggingEvent) {
-        log.add(loggingEvent);
-      }
-
-      @Override
-      public void close() {
-      }
-
-      public List<LoggingEvent> getLog() {
-        return new ArrayList<>(log);
-      }
-    }
-
-    TestAppender appender = new TestAppender();
-    final Logger logger = Logger.getRootLogger();
-    logger.addAppender(appender);
-
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     try {
       RMApp app = createRMApp(ApplicationId.newInstance(0, 1));
       metricsPublisher.appCreated(app, app.getStartTime());
       dispatcher.await();
-      for (LoggingEvent event : appender.getLog()) {
-        assertFalse("Dispatcher Crashed",
-            event.getRenderedMessage().contains("Error in dispatcher thread"));
-      }
+      assertFalse("Dispatcher Crashed",
+          logCapturer.getOutput().contains("Error in dispatcher thread"));
     } finally {
-      logger.removeAppender(appender);
+      logCapturer.stopCapturing();
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
index 2e7b01e..07630f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
@@ -18,12 +18,11 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
 import org.junit.Test;
 import java.io.IOException;
 import java.util.Map;
@@ -157,7 +156,7 @@
 
   @Test
   public void testPreemptionToBalanceWithVcoreResource() throws IOException {
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
     String labelsConfig = "=100:100,true"; // default partition
     String nodesConfig = "n1="; // only one node
     String queuesConfig =
@@ -195,7 +194,7 @@
 
   @Test
   public void testPreemptionToBalanceWithConfiguredTimeout() throws IOException {
-    Logger.getRootLogger().setLevel(Level.DEBUG);
+    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
     String labelsConfig = "=100:100,true"; // default partition
     String nodesConfig = "n1="; // only one node
     String queuesConfig =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
index 024ec86..c6066fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -16,6 +16,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework;
 
+import org.apache.hadoop.logging.HadoopLoggerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicyForNodePartitions;
 import org.slf4j.Logger;
@@ -110,8 +111,7 @@
   public void setup() {
     resetResourceInformationMap();
 
-    org.apache.log4j.Logger.getRootLogger().setLevel(
-        org.apache.log4j.Level.DEBUG);
+    HadoopLoggerUtils.setLogLevel("root", "DEBUG");
 
     conf = new CapacitySchedulerConfiguration(new Configuration(false));
     conf.setLong(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
index 6aaa15f..c5add68 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
@@ -25,9 +25,10 @@
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
-import org.apache.log4j.Logger;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static junit.framework.TestCase.fail;
 
@@ -37,8 +38,7 @@
  * the invariant throws in case the invariants are not respected.
  */
 public class TestMetricsInvariantChecker {
-  public final static Logger LOG =
-      Logger.getLogger(TestMetricsInvariantChecker.class);
+  public final static Logger LOG = LoggerFactory.getLogger(TestMetricsInvariantChecker.class);
 
   private MetricsSystem metricsSystem;
   private MetricsInvariantChecker ic;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 38fbcd8..68bbc94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -19,6 +19,7 @@
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.logging.LogCapturer;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
@@ -29,19 +30,13 @@
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.LoggerFactory;
 
 import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
 
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration.parseResourceConfigValue;
 import static org.junit.Assert.assertEquals;
@@ -54,29 +49,6 @@
 
   private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
 
-  private static class TestAppender extends AppenderSkeleton {
-
-    private final List<LoggingEvent> logEvents = new CopyOnWriteArrayList<>();
-
-    @Override
-    public boolean requiresLayout() {
-      return false;
-    }
-
-    @Override
-    public void close() {
-    }
-
-    @Override
-    protected void append(LoggingEvent arg0) {
-      logEvents.add(arg0);
-    }
-
-    private List<LoggingEvent> getLogEvents() {
-      return logEvents;
-    }
-  }
-
   @Rule
   public ExpectedException exception = ExpectedException.none();
 
@@ -751,9 +723,7 @@
 
   @Test
   public void testMemoryIncrementConfiguredViaMultipleProperties() {
-    TestAppender testAppender = new TestAppender();
-    Logger logger = LogManager.getRootLogger();
-    logger.addAppender(testAppender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     try {
       Configuration conf = new Configuration();
       conf.set("yarn.scheduler.increment-allocation-mb", "7");
@@ -763,23 +733,19 @@
       FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf);
       Resource increment = fsc.getIncrementAllocation();
       Assert.assertEquals(13L, increment.getMemorySize());
-      assertTrue("Warning message is not logged when specifying memory " +
-          "increment via multiple properties",
-          testAppender.getLogEvents().stream().anyMatch(
-            e -> e.getLevel() == Level.WARN && ("Configuration " +
-              "yarn.resource-types.memory-mb.increment-allocation=13 is " +
-              "overriding the yarn.scheduler.increment-allocation-mb=7 " +
-              "property").equals(e.getMessage())));
+      assertTrue("Warning message is not logged when specifying memory "
+          + "increment via multiple properties", logCapturer.getOutput().contains("Configuration "
+          + "yarn.resource-types.memory-mb.increment-allocation=13 is "
+          + "overriding the yarn.scheduler.increment-allocation-mb=7 "
+          + "property"));
     } finally {
-      logger.removeAppender(testAppender);
+      logCapturer.stopCapturing();
     }
   }
 
   @Test
   public void testCpuIncrementConfiguredViaMultipleProperties() {
-    TestAppender testAppender = new TestAppender();
-    Logger logger = LogManager.getRootLogger();
-    logger.addAppender(testAppender);
+    LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
     try {
       Configuration conf = new Configuration();
       conf.set("yarn.scheduler.increment-allocation-vcores", "7");
@@ -789,15 +755,13 @@
       FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf);
       Resource increment = fsc.getIncrementAllocation();
       Assert.assertEquals(13, increment.getVirtualCores());
-      assertTrue("Warning message is not logged when specifying CPU vCores " +
-          "increment via multiple properties",
-          testAppender.getLogEvents().stream().anyMatch(
-            e -> e.getLevel() == Level.WARN && ("Configuration " +
-              "yarn.resource-types.vcores.increment-allocation=13 is " +
-              "overriding the yarn.scheduler.increment-allocation-vcores=7 " +
-              "property").equals(e.getMessage())));
+      assertTrue("Warning message is not logged when specifying CPU vCores "
+          + "increment via multiple properties", logCapturer.getOutput().contains("Configuration "
+          + "yarn.resource-types.vcores.increment-allocation=13 is "
+          + "overriding the yarn.scheduler.increment-allocation-vcores=7 "
+          + "property"));
     } finally {
-      logger.removeAppender(testAppender);
+      logCapturer.stopCapturing();
     }
   }
 }