HDFS-16686. GetJournalEditServlet fails to authorize valid Kerberos request (#4724)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
index 81b3f8c..f726ff8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
@@ -27,11 +27,11 @@
 
 import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.text.StringEscapeUtils;
+import org.apache.hadoop.hdfs.server.namenode.DfsServlet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -64,7 +64,7 @@
  * </ul>
  */
 @InterfaceAudience.Private
-public class GetJournalEditServlet extends HttpServlet {
+public class GetJournalEditServlet extends DfsServlet {
 
   private static final long serialVersionUID = -4635891628211723009L;
   private static final Logger LOG =
@@ -77,17 +77,11 @@
 
   protected boolean isValidRequestor(HttpServletRequest request, Configuration conf)
       throws IOException {
-    String remotePrincipal = request.getUserPrincipal().getName();
-    String remoteShortName = request.getRemoteUser();
-    if (remotePrincipal == null) { // This really shouldn't happen...
-      LOG.warn("Received null remoteUser while authorizing access to " +
-          "GetJournalEditServlet");
-      return false;
-    }
+    UserGroupInformation ugi = getUGI(request, conf);
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Validating request made by " + remotePrincipal +
-          " / " + remoteShortName + ". This user is: " +
+      LOG.debug("Validating request made by " + ugi.getUserName() +
+          " / " + ugi.getShortUserName() + ". This user is: " +
           UserGroupInformation.getLoginUser());
     }
 
@@ -115,9 +109,9 @@
     for (String v : validRequestors) {
       if (LOG.isDebugEnabled())
         LOG.debug("isValidRequestor is comparing to valid requestor: " + v);
-      if (v != null && v.equals(remotePrincipal)) {
+      if (v != null && v.equals(ugi.getUserName())) {
         if (LOG.isDebugEnabled())
-          LOG.debug("isValidRequestor is allowing: " + remotePrincipal);
+          LOG.debug("isValidRequestor is allowing: " + ugi.getUserName());
         return true;
       }
     }
@@ -125,16 +119,16 @@
     // Additionally, we compare the short name of the requestor to this JN's
     // username, because we want to allow requests from other JNs during
     // recovery, but we can't enumerate the full list of JNs.
-    if (remoteShortName.equals(
+    if (ugi.getShortUserName().equals(
           UserGroupInformation.getLoginUser().getShortUserName())) {
       if (LOG.isDebugEnabled())
         LOG.debug("isValidRequestor is allowing other JN principal: " +
-            remotePrincipal);
+            ugi.getUserName());
       return true;
     }
 
     if (LOG.isDebugEnabled())
-      LOG.debug("isValidRequestor is rejecting: " + remotePrincipal);
+      LOG.debug("isValidRequestor is rejecting: " + ugi.getUserName());
     return false;
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index c428d20..6e7014c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -33,6 +33,8 @@
 import javax.management.ReflectionException;
 import javax.management.openmbean.CompositeDataSupport;
 
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -80,16 +82,38 @@
     }
   }
 
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  /**
+   * Create a default HDFS configuration which has test-specific data directories.  This is
+   * intended to protect against interactions between test runs that might corrupt results.  Each
+   * test run's data is automatically cleaned-up by JUnit.
+   *
+   * @return a default configuration with test-specific data directories
+   */
+  public Configuration getHdfsConfiguration() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+
+    // Override the file system locations with test-specific temporary folders
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        folder.newFolder("dfs/name").toString());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+        folder.newFolder("dfs/namesecondary").toString());
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+        folder.newFolder("dfs/data").toString());
+
+    return conf;
+  }
+
   /**
    * Test DFSAdmin Upgrade Command.
    */
   @Test
   public void testDFSAdminRollingUpgradeCommands() throws Exception {
     // start a cluster
-    final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    final Configuration conf = getHdfsConfiguration();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
       cluster.waitActive();
 
       final Path foo = new Path("/foo");
@@ -149,8 +173,6 @@
         Assert.assertTrue(dfs.exists(bar));
         Assert.assertTrue(dfs.exists(baz));
       }
-    } finally {
-      if(cluster != null) cluster.shutdown();
     }
   }
 
@@ -172,115 +194,116 @@
     LOG.info("nn1Dir=" + nn1Dir);
     LOG.info("nn2Dir=" + nn2Dir);
 
-    final Configuration conf = new HdfsConfiguration();
-    final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build();
-    mjc.waitActive();
-    setConf(conf, nn1Dir, mjc);
+    final Configuration conf = getHdfsConfiguration();
+    try (MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build()) {
+      mjc.waitActive();
+      setConf(conf, nn1Dir, mjc);
 
-    {
-      // Start the cluster once to generate the dfs dirs
-      final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(0)
-        .manageNameDfsDirs(false)
-        .checkExitOnShutdown(false)
-        .build();
-      // Shutdown the cluster before making a copy of the namenode dir to release
-      // all file locks, otherwise, the copy will fail on some platforms.
-      cluster.shutdown();
-    }
-
-    MiniDFSCluster cluster2 = null;
-    try {
-      // Start a second NN pointed to the same quorum.
-      // We need to copy the image dir from the first NN -- or else
-      // the new NN will just be rejected because of Namespace mismatch.
-      FileUtil.fullyDelete(nn2Dir);
-      FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(),
-          new Path(nn2Dir.getAbsolutePath()), false, conf);
-
-      // Start the cluster again
-      final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .numDataNodes(0)
-        .format(false)
-        .manageNameDfsDirs(false)
-        .checkExitOnShutdown(false)
-        .build();
-
-      final Path foo = new Path("/foo");
-      final Path bar = new Path("/bar");
-      final Path baz = new Path("/baz");
-
-      final RollingUpgradeInfo info1;
       {
-        final DistributedFileSystem dfs = cluster.getFileSystem();
-        dfs.mkdirs(foo);
-
-        //start rolling upgrade
-        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-        info1 = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
-        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-        LOG.info("START\n" + info1);
-
-        //query rolling upgrade
-        assertEquals(info1, dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
-
-        dfs.mkdirs(bar);
+        // Start the cluster once to generate the dfs dirs
+        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+            .numDataNodes(0)
+            .manageNameDfsDirs(false)
+            .checkExitOnShutdown(false)
+            .build();
+        // Shutdown the cluster before making a copy of the namenode dir to release
+        // all file locks, otherwise, the copy will fail on some platforms.
         cluster.shutdown();
       }
 
-      // cluster2 takes over QJM
-      final Configuration conf2 = setConf(new Configuration(), nn2Dir, mjc);
-      cluster2 = new MiniDFSCluster.Builder(conf2)
-        .numDataNodes(0)
-        .format(false)
-        .manageNameDfsDirs(false)
-        .build();
-      final DistributedFileSystem dfs2 = cluster2.getFileSystem();
-
-      // Check that cluster2 sees the edits made on cluster1
-      Assert.assertTrue(dfs2.exists(foo));
-      Assert.assertTrue(dfs2.exists(bar));
-      Assert.assertFalse(dfs2.exists(baz));
-
-      //query rolling upgrade in cluster2
-      assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
-
-      dfs2.mkdirs(baz);
-
-      LOG.info("RESTART cluster 2");
-      cluster2.restartNameNode();
-      assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
-      Assert.assertTrue(dfs2.exists(foo));
-      Assert.assertTrue(dfs2.exists(bar));
-      Assert.assertTrue(dfs2.exists(baz));
-
-      //restart cluster with -upgrade should fail.
+      MiniDFSCluster cluster2 = null;
       try {
-        cluster2.restartNameNode("-upgrade");
-      } catch(IOException e) {
-        LOG.info("The exception is expected.", e);
+        // Start a second NN pointed to the same quorum.
+        // We need to copy the image dir from the first NN -- or else
+        // the new NN will just be rejected because of Namespace mismatch.
+        FileUtil.fullyDelete(nn2Dir);
+        FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(),
+            new Path(nn2Dir.getAbsolutePath()), false, conf);
+
+        // Start the cluster again
+        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+            .numDataNodes(0)
+            .format(false)
+            .manageNameDfsDirs(false)
+            .checkExitOnShutdown(false)
+            .build();
+
+        final Path foo = new Path("/foo");
+        final Path bar = new Path("/bar");
+        final Path baz = new Path("/baz");
+
+        final RollingUpgradeInfo info1;
+        {
+          final DistributedFileSystem dfs = cluster.getFileSystem();
+          dfs.mkdirs(foo);
+
+          //start rolling upgrade
+          dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+          info1 = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
+          dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+          LOG.info("START\n" + info1);
+
+          //query rolling upgrade
+          assertEquals(info1, dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
+
+          dfs.mkdirs(bar);
+          cluster.shutdown();
+        }
+
+        // cluster2 takes over QJM
+        final Configuration conf2 = setConf(new Configuration(), nn2Dir, mjc);
+        cluster2 = new MiniDFSCluster.Builder(conf2)
+            .numDataNodes(0)
+            .format(false)
+            .manageNameDfsDirs(false)
+            .build();
+        final DistributedFileSystem dfs2 = cluster2.getFileSystem();
+
+        // Check that cluster2 sees the edits made on cluster1
+        Assert.assertTrue(dfs2.exists(foo));
+        Assert.assertTrue(dfs2.exists(bar));
+        Assert.assertFalse(dfs2.exists(baz));
+
+        //query rolling upgrade in cluster2
+        assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
+
+        dfs2.mkdirs(baz);
+
+        LOG.info("RESTART cluster 2");
+        cluster2.restartNameNode();
+        assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
+        Assert.assertTrue(dfs2.exists(foo));
+        Assert.assertTrue(dfs2.exists(bar));
+        Assert.assertTrue(dfs2.exists(baz));
+
+        //restart cluster with -upgrade should fail.
+        try {
+          cluster2.restartNameNode("-upgrade");
+        } catch (IOException e) {
+          LOG.info("The exception is expected.", e);
+        }
+
+        LOG.info("RESTART cluster 2 again");
+        cluster2.restartNameNode();
+        assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
+        Assert.assertTrue(dfs2.exists(foo));
+        Assert.assertTrue(dfs2.exists(bar));
+        Assert.assertTrue(dfs2.exists(baz));
+
+        //finalize rolling upgrade
+        final RollingUpgradeInfo finalize = dfs2.rollingUpgrade(
+            RollingUpgradeAction.FINALIZE);
+        Assert.assertTrue(finalize.isFinalized());
+
+        LOG.info("RESTART cluster 2 with regular startup option");
+        cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
+        cluster2.restartNameNode();
+        Assert.assertTrue(dfs2.exists(foo));
+        Assert.assertTrue(dfs2.exists(bar));
+        Assert.assertTrue(dfs2.exists(baz));
+      } finally {
+        if (cluster2 != null) cluster2.shutdown();
       }
-
-      LOG.info("RESTART cluster 2 again");
-      cluster2.restartNameNode();
-      assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
-      Assert.assertTrue(dfs2.exists(foo));
-      Assert.assertTrue(dfs2.exists(bar));
-      Assert.assertTrue(dfs2.exists(baz));
-
-      //finalize rolling upgrade
-      final RollingUpgradeInfo finalize = dfs2.rollingUpgrade(
-          RollingUpgradeAction.FINALIZE);
-      Assert.assertTrue(finalize.isFinalized());
-
-      LOG.info("RESTART cluster 2 with regular startup option");
-      cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
-      cluster2.restartNameNode();
-      Assert.assertTrue(dfs2.exists(foo));
-      Assert.assertTrue(dfs2.exists(bar));
-      Assert.assertTrue(dfs2.exists(baz));
-    } finally {
-      if (cluster2 != null) cluster2.shutdown();
     }
   }
 
@@ -309,10 +332,8 @@
   @Test
   public void testRollback() throws Exception {
     // start a cluster
-    final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    final Configuration conf = getHdfsConfiguration();
+    try (MiniDFSCluster cluster  = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) {
       cluster.waitActive();
 
       final Path foo = new Path("/foo");
@@ -352,8 +373,6 @@
 
       startRollingUpgrade(foo, bar, file, data, cluster);
       rollbackRollingUpgrade(foo, bar, file, data, cluster);
-    } finally {
-      if(cluster != null) cluster.shutdown();
     }
   }
 
@@ -407,10 +426,8 @@
   @Test
   public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
     // start a cluster
-    final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    final Configuration conf = getHdfsConfiguration();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) {
       cluster.waitActive();
       final DFSAdmin dfsadmin = new DFSAdmin(conf);
       DataNode dn = cluster.getDataNodes().get(0);
@@ -431,8 +448,6 @@
 
       // ping should fail.
       assertEquals(-1, dfsadmin.run(args1));
-    } finally {
-      if (cluster != null) cluster.shutdown();
     }
   }
 
@@ -462,7 +477,7 @@
 
   private void testFinalize(int nnCount, boolean skipImageDeltaCheck)
       throws Exception {
-    final Configuration conf = new HdfsConfiguration();
+    final Configuration conf = getHdfsConfiguration();
     MiniQJMHACluster cluster = null;
     final Path foo = new Path("/foo");
     final Path bar = new Path("/bar");
@@ -528,10 +543,8 @@
   }
 
   private void testQuery(int nnCount) throws Exception{
-    final Configuration conf = new Configuration();
-    MiniQJMHACluster cluster = null;
-    try {
-      cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build();
+    final Configuration conf = getHdfsConfiguration();
+    try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build()) {
       MiniDFSCluster dfsCluster = cluster.getDfsCluster();
       dfsCluster.waitActive();
 
@@ -561,19 +574,13 @@
       // The NN should have a copy of the fsimage in case of rollbacks.
       Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage()
               .hasRollbackFSImage());
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
     }
   }
 
   @Test (timeout = 300000)
   public void testQueryAfterRestart() throws IOException, InterruptedException {
-    final Configuration conf = new Configuration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    final Configuration conf = getHdfsConfiguration();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
       cluster.waitActive();
       DistributedFileSystem dfs = cluster.getFileSystem();
 
@@ -587,10 +594,6 @@
 
       cluster.restartNameNodes();
       dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
     }
   }
 
@@ -606,7 +609,7 @@
 
   @Test(timeout = 60000)
   public void testRollBackImage() throws Exception {
-    final Configuration conf = new Configuration();
+    final Configuration conf = getHdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 2);
@@ -651,15 +654,14 @@
   }
 
   public void testCheckpoint(int nnCount) throws IOException, InterruptedException {
-    final Configuration conf = new Configuration();
+    final Configuration conf = getHdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
 
-    MiniQJMHACluster cluster = null;
     final Path foo = new Path("/foo");
 
-    try {
-      cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build();
+    try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount)
+        .build()) {
       MiniDFSCluster dfsCluster = cluster.getDfsCluster();
       dfsCluster.waitActive();
 
@@ -681,17 +683,14 @@
         verifyNNCheckpoint(dfsCluster, txid, i);
       }
 
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
     }
   }
 
   /**
    * Verify that the namenode at the given index has an FSImage with a TxId up to txid-1
    */
-  private void verifyNNCheckpoint(MiniDFSCluster dfsCluster, long txid, int nnIndex) throws InterruptedException {
+  private void verifyNNCheckpoint(MiniDFSCluster dfsCluster, long txid, int nnIndex)
+      throws InterruptedException {
     int retries = 0;
     while (++retries < 5) {
       NNStorage storage = dfsCluster.getNamesystem(nnIndex).getFSImage()
@@ -732,7 +731,7 @@
     SecondaryNameNode snn = null;
 
     try {
-      Configuration conf = new HdfsConfiguration();
+      Configuration conf = getHdfsConfiguration();
       cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 3ece3d7..0791e0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -35,7 +35,7 @@
 import java.util.List;
 import java.util.Random;
 
-public class MiniQJMHACluster {
+public class MiniQJMHACluster implements AutoCloseable {
   private MiniDFSCluster cluster;
   private MiniJournalCluster journalCluster;
   private final Configuration conf;
@@ -189,4 +189,15 @@
     cluster.shutdown();
     journalCluster.shutdown();
   }
+
+  @Override
+  public void close() {
+    try {
+      shutdown();
+    } catch (IOException shutdownFailure) {
+      LOG.warn("Exception while closing journal cluster", shutdownFailure);
+    }
+
+  }
+
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java
new file mode 100644
index 0000000..7d9dea0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdfs.qjournal.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestGetJournalEditServlet {
+
+  private final static Configuration CONF = new HdfsConfiguration();
+
+  private final static GetJournalEditServlet SERVLET = new GetJournalEditServlet();
+
+  @BeforeClass
+  public static void setUp() throws ServletException {
+    // Configure Hadoop
+    CONF.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
+    CONF.set(DFSConfigKeys.HADOOP_SECURITY_AUTH_TO_LOCAL,
+        "RULE:[2:$1/$2@$0]([nsdj]n/.*@REALM\\.TLD)s/.*/hdfs/\nDEFAULT");
+    CONF.set(DFSConfigKeys.DFS_NAMESERVICES, "ns");
+    CONF.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "nn/_HOST@REALM.TLD");
+
+    // Configure Kerberos UGI
+    UserGroupInformation.setConfiguration(CONF);
+    UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(
+        "jn/somehost@REALM.TLD"));
+
+    // Initialize the servlet
+    ServletConfig config = mock(ServletConfig.class);
+    SERVLET.init(config);
+  }
+
+  /**
+   * Unauthenticated user should be rejected.
+   *
+   * @throws IOException for unexpected validation failures
+   */
+  @Test
+  public void testWithoutUser() throws IOException {
+    // Test: Make a request without specifying a user
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    boolean isValid = SERVLET.isValidRequestor(request, CONF);
+
+    // Verify: The request is invalid
+    assertThat(isValid).isFalse();
+  }
+
+  /**
+   * Namenode requests should be authorized, since it will match the configured namenode.
+   *
+   * @throws IOException for unexpected validation failures
+   */
+  @Test
+  public void testRequestNameNode() throws IOException, ServletException {
+    // Test: Make a request from a namenode
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    when(request.getParameter(UserParam.NAME)).thenReturn("nn/localhost@REALM.TLD");
+    boolean isValid = SERVLET.isValidRequestor(request, CONF);
+
+    assertThat(isValid).isTrue();
+  }
+
+  /**
+   * There is a fallback using the short name, which is used by journalnodes.
+   *
+   * @throws IOException for unexpected validation failures
+   */
+  @Test
+  public void testRequestShortName() throws IOException {
+    // Test: Make a request from a namenode
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    when(request.getParameter(UserParam.NAME)).thenReturn("jn/localhost@REALM.TLD");
+    boolean isValid = SERVLET.isValidRequestor(request, CONF);
+
+    assertThat(isValid).isTrue();
+  }
+
+}
\ No newline at end of file