HDDS-2476. Share more code between metadata and data scanners. (#3727)

diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
index 7d07e2f..bf72793 100644
--- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
@@ -82,7 +82,7 @@
     <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
   </Match>
   <Match>
-    <Class name="org.apache.hadoop.ozone.container.ozoneimpl.TestContainerScrubberMetrics" />
+    <Class name="org.apache.hadoop.ozone.container.ozoneimpl.TestContainerScannerMetrics"/>
     <Bug pattern="RU_INVOKE_RUN" />
   </Match>
 </FindBugsFilter>
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
index 46fc244..6d19a9f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
@@ -23,13 +23,13 @@
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA_DEFAULT;
-import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.HDDS_CONTAINER_SCRUB_ENABLED;
+import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.HDDS_CONTAINER_SCRUB_ENABLED;
 
 /**
  * Select an appropriate ChunkManager implementation as per config setting.
@@ -58,10 +58,10 @@
         HDDS_CONTAINER_PERSISTDATA_DEFAULT);
 
     if (!persist) {
-      ContainerScrubberConfiguration scrubber = conf.getObject(
-          ContainerScrubberConfiguration.class);
-      if (scrubber.isEnabled()) {
-        // Data Scrubber needs to be disabled for non-persistent chunks.
+      ContainerScannerConfiguration scannerConfig = conf.getObject(
+          ContainerScannerConfiguration.class);
+      if (scannerConfig.isEnabled()) {
+        // Data scanner needs to be disabled for non-persistent chunks.
         LOG.warn("Failed to set " + HDDS_CONTAINER_PERSISTDATA + " to false."
             + " Please set " + HDDS_CONTAINER_SCRUB_ENABLED
             + " also to false to enable non-persistent containers.");
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractContainerScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractContainerScanner.java
new file mode 100644
index 0000000..7877f5a
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractContainerScanner.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+package org.apache.hadoop.ozone.container.ozoneimpl;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Base class for scheduled scanners on a Datanode.
+ */
+public abstract class AbstractContainerScanner extends Thread {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(AbstractContainerScanner.class);
+
+  private final long dataScanInterval;
+
+  /**
+   * True if the thread is stopping.<p/>
+   * Protected by this object's lock.
+   */
+  private volatile boolean stopping = false;
+
+  public AbstractContainerScanner(String name, long dataScanInterval) {
+    this.dataScanInterval = dataScanInterval;
+    setName(name);
+    setDaemon(true);
+  }
+
+  @Override
+  public final void run() {
+    AbstractContainerScannerMetrics metrics = getMetrics();
+    try {
+      while (!stopping) {
+        runIteration();
+        metrics.resetNumContainersScanned();
+        metrics.resetNumUnhealthyContainers();
+      }
+      LOG.info("{} exiting.", this);
+    } catch (Exception e) {
+      LOG.error("{} exiting because of exception ", this, e);
+    } finally {
+      if (metrics != null) {
+        metrics.unregister();
+      }
+    }
+  }
+
+  @VisibleForTesting
+  public final void runIteration() {
+    long startTime = System.nanoTime();
+    scanContainers();
+    long totalDuration = System.nanoTime() - startTime;
+    if (stopping) {
+      return;
+    }
+    AbstractContainerScannerMetrics metrics = getMetrics();
+    metrics.incNumScanIterations();
+    LOG.info("Completed an iteration in {} minutes." +
+            " Number of iterations (since the data-node restart) : {}" +
+            ", Number of containers scanned in this iteration : {}" +
+            ", Number of unhealthy containers found in this iteration : {}",
+        TimeUnit.NANOSECONDS.toMinutes(totalDuration),
+        metrics.getNumScanIterations(),
+        metrics.getNumContainersScanned(),
+        metrics.getNumUnHealthyContainers());
+    long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(totalDuration);
+    long remainingSleep = dataScanInterval - elapsedMillis;
+    handleRemainingSleep(remainingSleep);
+  }
+
+  public final void scanContainers() {
+    Iterator<Container<?>> itr = getContainerIterator();
+    while (!stopping && itr.hasNext()) {
+      Container<?> c = itr.next();
+      try {
+        scanContainer(c);
+      } catch (IOException ex) {
+        LOG.warn("Unexpected exception while scanning container "
+            + c.getContainerData().getContainerID(), ex);
+      }
+    }
+  }
+
+  public abstract Iterator<Container<?>> getContainerIterator();
+
+  public abstract void scanContainer(Container<?> c) throws IOException;
+
+  public final void handleRemainingSleep(long remainingSleep) {
+    if (remainingSleep > 0) {
+      try {
+        Thread.sleep(remainingSleep);
+      } catch (InterruptedException ignored) {
+        this.stopping = true;
+        LOG.warn("Background container scan was interrupted.");
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+  public synchronized void shutdown() {
+    this.stopping = true;
+    this.interrupt();
+    try {
+      this.join();
+    } catch (InterruptedException ex) {
+      LOG.warn("Unexpected exception while stopping data scanner.", ex);
+      Thread.currentThread().interrupt();
+    }
+  }
+
+  @VisibleForTesting
+  public abstract AbstractContainerScannerMetrics getMetrics();
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractContainerScannerMetrics.java
similarity index 68%
rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractContainerScannerMetrics.java
index b70a3e5..3dda31b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/AbstractContainerScannerMetrics.java
@@ -7,13 +7,15 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ *   http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
  */
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
@@ -21,17 +23,15 @@
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableCounterInt;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 
 /**
- * This class captures the container meta-data scrubber metrics on the
- * data-node.
- **/
+ * Base class for container scanner metrics.
+ */
 @InterfaceAudience.Private
-@Metrics(about = "DataNode container data scrubber metrics", context = "dfs")
-public final class ContainerMetadataScrubberMetrics {
+@Metrics(about = "Datanode container scanner metrics", context = "dfs")
+public abstract class AbstractContainerScannerMetrics {
 
   private final String name;
   private final MetricsSystem ms;
@@ -43,6 +43,11 @@
   @Metric("number of iterations of scanner completed since the restart")
   private MutableCounterInt numScanIterations;
 
+  public AbstractContainerScannerMetrics(String name, MetricsSystem ms) {
+    this.name = name;
+    this.ms = ms;
+  }
+
   public int getNumContainersScanned() {
     return numContainersScanned.value();
   }
@@ -82,17 +87,4 @@
   public String getName() {
     return name;
   }
-
-  private ContainerMetadataScrubberMetrics(String name, MetricsSystem ms) {
-    this.name = name;
-    this.ms = ms;
-  }
-
-  public static ContainerMetadataScrubberMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    String name = "ContainerMetadataScrubberMetrics";
-    return ms.register(name, null,
-        new ContainerMetadataScrubberMetrics(name, ms));
-  }
-
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
index 5e1e5a1..91f9d95 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java
@@ -17,12 +17,6 @@
  */
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
-import java.io.IOException;
-import java.time.Instant;
-import java.util.Iterator;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@@ -32,11 +26,15 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.time.Instant;
+import java.util.Iterator;
+import java.util.Optional;
+
 /**
- * VolumeScanner scans a single volume.  Each VolumeScanner has its own thread.
- * <p>They are all managed by the DataNode's BlockScanner.
+ * Data scanner that full checks a volume. Each volume gets a separate thread.
  */
-public class ContainerDataScanner extends Thread {
+public class ContainerDataScanner extends AbstractContainerScanner {
   public static final Logger LOG =
       LoggerFactory.getLogger(ContainerDataScanner.class);
 
@@ -47,103 +45,42 @@
   private final ContainerController controller;
   private final DataTransferThrottler throttler;
   private final Canceler canceler;
-  private final ContainerDataScrubberMetrics metrics;
-  private final long dataScanInterval;
   private static final String NAME_FORMAT = "ContainerDataScanner(%s)";
+  private final ContainerDataScannerMetrics metrics;
 
-  /**
-   * True if the thread is stopping.<p/>
-   * Protected by this object's lock.
-   */
-  private volatile boolean stopping = false;
-
-
-  public ContainerDataScanner(ContainerScrubberConfiguration conf,
+  public ContainerDataScanner(ContainerScannerConfiguration conf,
                               ContainerController controller,
                               HddsVolume volume) {
+    super(String.format(NAME_FORMAT, volume), conf.getDataScanInterval());
     this.controller = controller;
     this.volume = volume;
-    dataScanInterval = conf.getDataScanInterval();
     throttler = new HddsDataTransferThrottler(conf.getBandwidthPerVolume());
     canceler = new Canceler();
-    metrics = ContainerDataScrubberMetrics.create(volume.toString());
-    setName(String.format(NAME_FORMAT, volume));
-    setDaemon(true);
+    this.metrics = ContainerDataScannerMetrics.create(volume.toString());
   }
 
   @Override
-  public void run() {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("{}: thread starting.", this);
+  public void scanContainer(Container<?> c) throws IOException {
+    if (!c.shouldScanData()) {
+      return;
     }
-    try {
-      while (!stopping) {
-        runIteration();
-        metrics.resetNumContainersScanned();
-        metrics.resetNumUnhealthyContainers();
-      }
-      LOG.info("{} exiting.", this);
-    } catch (Exception e) {
-      LOG.error("{} exiting because of exception ", this, e);
-    } finally {
-      if (metrics != null) {
-        metrics.unregister();
-      }
+    ContainerData containerData = c.getContainerData();
+    long containerId = containerData.getContainerID();
+    logScanStart(containerData);
+    if (!c.scanData(throttler, canceler)) {
+      metrics.incNumUnHealthyContainers();
+      controller.markContainerUnhealthy(containerId);
+    } else {
+      Instant now = Instant.now();
+      logScanCompleted(containerData, now);
+      controller.updateDataScanTimestamp(containerId, now);
     }
+    metrics.incNumContainersScanned();
   }
 
-  @VisibleForTesting
-  public void runIteration() {
-    long startTime = System.nanoTime();
-    Iterator<Container<?>> itr = controller.getContainers(volume);
-    while (!stopping && itr.hasNext()) {
-      Container c = itr.next();
-      if (c.shouldScanData()) {
-        ContainerData containerData = c.getContainerData();
-        long containerId = containerData.getContainerID();
-        try {
-          logScanStart(containerData);
-          if (!c.scanData(throttler, canceler)) {
-            metrics.incNumUnHealthyContainers();
-            controller.markContainerUnhealthy(containerId);
-          } else {
-            Instant now = Instant.now();
-            logScanCompleted(containerData, now);
-            controller.updateDataScanTimestamp(containerId, now);
-          }
-        } catch (IOException ex) {
-          LOG.warn("Unexpected exception while scanning container "
-              + containerId, ex);
-        } finally {
-          metrics.incNumContainersScanned();
-        }
-      }
-    }
-    long totalDuration = System.nanoTime() - startTime;
-    if (!stopping) {
-      if (metrics.getNumContainersScanned() > 0) {
-        metrics.incNumScanIterations();
-        LOG.info("Completed an iteration of container data scrubber in" +
-                " {} minutes." +
-                " Number of iterations (since the data-node restart) : {}" +
-                ", Number of containers scanned in this iteration : {}" +
-                ", Number of unhealthy containers found in this iteration : {}",
-            TimeUnit.NANOSECONDS.toMinutes(totalDuration),
-            metrics.getNumScanIterations(),
-            metrics.getNumContainersScanned(),
-            metrics.getNumUnHealthyContainers());
-      }
-      long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(totalDuration);
-      long remainingSleep = dataScanInterval - elapsedMillis;
-      if (remainingSleep > 0) {
-        try {
-          Thread.sleep(remainingSleep);
-        } catch (InterruptedException ignored) {
-          LOG.warn("Operation was interrupted.");
-          Thread.currentThread().interrupt();
-        }
-      }
-    }
+  @Override
+  public Iterator<Container<?>> getContainerIterator() {
+    return controller.getContainers(volume);
   }
 
   private static void logScanStart(ContainerData containerData) {
@@ -163,23 +100,17 @@
     }
   }
 
+  @Override
   public synchronized void shutdown() {
-    this.stopping = true;
     this.canceler.cancel(
         String.format(NAME_FORMAT, volume) + " is shutting down");
-    this.interrupt();
-    try {
-      this.join();
-    } catch (InterruptedException ex) {
-      LOG.warn("Unexpected exception while stopping data scanner for volume "
-          + volume, ex);
-      Thread.currentThread().interrupt();
-    }
+    super.shutdown();
   }
 
   @VisibleForTesting
-  public ContainerDataScrubberMetrics getMetrics() {
-    return metrics;
+  @Override
+  public ContainerDataScannerMetrics getMetrics() {
+    return this.metrics;
   }
 
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java
new file mode 100644
index 0000000..a3f71d3
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScannerMetrics.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.ozoneimpl;
+
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * This class captures the container data scanner metrics on the data-node.
+ **/
+@InterfaceAudience.Private
+@Metrics(about = "DataNode container data scanner metrics", context = "dfs")
+public final class ContainerDataScannerMetrics
+    extends AbstractContainerScannerMetrics {
+
+  @Metric("disk bandwidth used by the container data scanner per volume")
+  private MutableRate numBytesScanned;
+
+  public double getNumBytesScannedMean() {
+    return numBytesScanned.lastStat().mean();
+  }
+
+  public long getNumBytesScannedSampleCount() {
+    return numBytesScanned.lastStat().numSamples();
+  }
+
+  public double getNumBytesScannedStdDev() {
+    return numBytesScanned.lastStat().stddev();
+  }
+
+  public void incNumBytesScanned(long bytes) {
+    numBytesScanned.add(bytes);
+  }
+
+  private ContainerDataScannerMetrics(String name, MetricsSystem ms) {
+    super(name, ms);
+  }
+
+  @SuppressWarnings("java:S2245") // no need for secure random
+  public static ContainerDataScannerMetrics create(final String volumeName) {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    String name = "ContainerDataScannerMetrics-" + (volumeName.isEmpty()
+        ? "UndefinedDataNodeVolume" + ThreadLocalRandom.current().nextInt()
+        : volumeName.replace(':', '-'));
+
+    return ms.register(name, null, new ContainerDataScannerMetrics(name, ms));
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
deleted file mode 100644
index bc62d7f..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.ozoneimpl;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterInt;
-import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * This class captures the container data scrubber metrics on the data-node.
- **/
-@InterfaceAudience.Private
-@Metrics(about = "DataNode container data scrubber metrics", context = "dfs")
-public final class ContainerDataScrubberMetrics {
-
-  private final String name;
-  private final MetricsSystem ms;
-
-  @Metric("number of containers scanned in the current iteration")
-  private MutableGaugeInt numContainersScanned;
-  @Metric("number of unhealthy containers found in the current iteration")
-  private MutableGaugeInt numUnHealthyContainers;
-  @Metric("number of iterations of scanner completed since the restart")
-  private MutableCounterInt numScanIterations;
-  @Metric("disk bandwidth used by the container data scrubber per volume")
-  private MutableRate numBytesScanned;
-
-  public int getNumContainersScanned() {
-    return numContainersScanned.value();
-  }
-
-  public void incNumContainersScanned() {
-    numContainersScanned.incr();
-  }
-
-  public void resetNumContainersScanned() {
-    numContainersScanned.decr(getNumContainersScanned());
-  }
-
-  public int getNumUnHealthyContainers() {
-    return numUnHealthyContainers.value();
-  }
-
-  public void incNumUnHealthyContainers() {
-    numUnHealthyContainers.incr();
-  }
-
-  public void resetNumUnhealthyContainers() {
-    numUnHealthyContainers.decr(getNumUnHealthyContainers());
-  }
-
-  public int getNumScanIterations() {
-    return numScanIterations.value();
-  }
-
-  public void incNumScanIterations() {
-    numScanIterations.incr();
-  }
-
-  public double getNumBytesScannedMean() {
-    return numBytesScanned.lastStat().mean();
-  }
-
-  public long getNumBytesScannedSampleCount() {
-    return numBytesScanned.lastStat().numSamples();
-  }
-
-  public double getNumBytesScannedStdDev() {
-    return numBytesScanned.lastStat().stddev();
-  }
-
-  public void incNumBytesScanned(long bytes) {
-    numBytesScanned.add(bytes);
-  }
-
-  public void unregister() {
-    ms.unregisterSource(name);
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  private ContainerDataScrubberMetrics(String name, MetricsSystem ms) {
-    this.name = name;
-    this.ms = ms;
-  }
-
-  @SuppressWarnings("java:S2245") // no need for secure random
-  public static ContainerDataScrubberMetrics create(final String volumeName) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    String name = "ContainerDataScrubberMetrics-" + (volumeName.isEmpty()
-        ? "UndefinedDataNodeVolume" + ThreadLocalRandom.current().nextInt()
-        : volumeName.replace(':', '-'));
-
-    return ms.register(name, null, new ContainerDataScrubberMetrics(name, ms));
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
index 59657b0..b1c3e66 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java
@@ -24,128 +24,44 @@
 
 import java.io.IOException;
 import java.util.Iterator;
-import java.util.concurrent.TimeUnit;
 
 /**
  * This class is responsible to perform metadata verification of the
  * containers.
+ * Only one thread will be responsible for scanning all volumes.
  */
-public class ContainerMetadataScanner extends Thread {
+public class ContainerMetadataScanner extends AbstractContainerScanner {
   public static final Logger LOG =
       LoggerFactory.getLogger(ContainerMetadataScanner.class);
 
+  private final ContainerMetadataScannerMetrics metrics;
   private final ContainerController controller;
-  private final long metadataScanInterval;
-  private final ContainerMetadataScrubberMetrics metrics;
-  /**
-   * True if the thread is stopping.<p/>
-   * Protected by this object's lock.
-   */
-  private boolean stopping = false;
 
-  public ContainerMetadataScanner(ContainerScrubberConfiguration conf,
+  public ContainerMetadataScanner(ContainerScannerConfiguration conf,
                                   ContainerController controller) {
+    super("ContainerMetadataScanner", conf.getMetadataScanInterval());
     this.controller = controller;
-    this.metadataScanInterval = conf.getMetadataScanInterval();
-    this.metrics = ContainerMetadataScrubberMetrics.create();
-    setName("ContainerMetadataScanner");
-    setDaemon(true);
+    this.metrics = ContainerMetadataScannerMetrics.create();
   }
 
   @Override
-  public void run() {
-    /*
-     * the outer daemon loop exits on shutdown()
-     */
-    LOG.info("Background ContainerMetadataScanner starting up");
-    try {
-      while (!stopping) {
-        runIteration();
-        if (!stopping) {
-          metrics.resetNumUnhealthyContainers();
-          metrics.resetNumContainersScanned();
-        }
-      }
-    } catch (Exception e) {
-      LOG.error("{} exiting because of exception ", this, e);
-    } finally {
-      if (metrics != null) {
-        metrics.unregister();
-      }
-    }
+  public Iterator<Container<?>> getContainerIterator() {
+    return controller.getContainers();
   }
 
   @VisibleForTesting
-  void runIteration() {
-    long start = System.nanoTime();
-    Iterator<Container<?>> containerIt = controller.getContainers();
-    while (!stopping && containerIt.hasNext()) {
-      Container container = containerIt.next();
-      try {
-        scrub(container);
-      } catch (IOException e) {
-        LOG.info("Unexpected error while scrubbing container {}",
-            container.getContainerData().getContainerID());
-      } finally {
-        metrics.incNumContainersScanned();
-      }
-    }
-    long interval = System.nanoTime() - start;
-    if (!stopping) {
-      metrics.incNumScanIterations();
-      LOG.info("Completed an iteration of container metadata scrubber in" +
-              " {} minutes." +
-              " Number of  iterations (since the data-node restart) : {}" +
-              ", Number of containers scanned in this iteration : {}" +
-              ", Number of unhealthy containers found in this iteration : {}",
-          TimeUnit.NANOSECONDS.toMinutes(interval),
-          metrics.getNumScanIterations(),
-          metrics.getNumContainersScanned(),
-          metrics.getNumUnHealthyContainers());
-      long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(interval);
-      long remainingSleep = metadataScanInterval - elapsedMillis;
-      if (remainingSleep > 0) {
-        try {
-          Thread.sleep(remainingSleep);
-        } catch (InterruptedException e) {
-          LOG.info("Background ContainerMetadataScanner interrupted." +
-              " Going to exit");
-          // Restore the interruption flag and the internal `stopping`
-          // variable to prevent the next iteration thus stopping the thread
-          interrupt();
-          this.stopping = true;
-        }
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public void scrub(Container container) throws IOException {
+  @Override
+  public void scanContainer(Container<?> container) throws IOException {
     if (!container.scanMetaData()) {
       metrics.incNumUnHealthyContainers();
       controller.markContainerUnhealthy(
           container.getContainerData().getContainerID());
     }
+    metrics.incNumContainersScanned();
   }
 
-  @VisibleForTesting
-  public ContainerMetadataScrubberMetrics getMetrics() {
-    return metrics;
-  }
-
-  /**
-   * Shutdown the ContainerMetadataScanner thread.
-   */
-  // Ignore the sonar false positive on the InterruptedException issue
-  // as this a normal flow of a shutdown.
-  @SuppressWarnings("squid:S2142")
-  public synchronized void shutdown() {
-    this.stopping = true;
-    this.interrupt();
-    try {
-      this.join();
-    } catch (InterruptedException ex) {
-      LOG.debug("Interrupted exception while stopping metadata scanner.", ex);
-    }
+  @Override
+  public ContainerMetadataScannerMetrics getMetrics() {
+    return this.metrics;
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScannerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScannerMetrics.java
new file mode 100644
index 0000000..d7ba5cb
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScannerMetrics.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.ozoneimpl;
+
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
+/**
+ * This class captures the container meta-data scanner metrics on the
+ * data-node.
+ **/
+@InterfaceAudience.Private
+@Metrics(about = "DataNode container metadata scanner metrics", context = "dfs")
+public final class ContainerMetadataScannerMetrics
+    extends AbstractContainerScannerMetrics {
+
+  private ContainerMetadataScannerMetrics(String name, MetricsSystem ms) {
+    super(name, ms);
+  }
+
+  public static ContainerMetadataScannerMetrics create() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    String name = "ContainerMetadataScannerMetrics";
+    return ms.register(name, null,
+        new ContainerMetadataScannerMetrics(name, ms));
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java
similarity index 92%
rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java
rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java
index 6dcefe8..4fa7773 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java
@@ -28,13 +28,13 @@
 import java.time.Duration;
 
 /**
- * This class defines configuration parameters for container scrubber.
+ * This class defines configuration parameters for the container scanners.
  **/
 @ConfigGroup(prefix = "hdds.container.scrub")
-public class ContainerScrubberConfiguration {
+public class ContainerScannerConfiguration {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerScrubberConfiguration.class);
+      LoggerFactory.getLogger(ContainerScannerConfiguration.class);
 
   // only for log
   public static final String HDDS_CONTAINER_SCRUB_ENABLED =
@@ -56,7 +56,7 @@
       type = ConfigType.BOOLEAN,
       defaultValue = "false",
       tags = {ConfigTag.STORAGE},
-      description = "Config parameter to enable container scrubber.")
+      description = "Config parameter to enable container scanner.")
   private boolean enabled = false;
 
   @Config(key = "metadata.scan.interval",
@@ -64,7 +64,7 @@
       defaultValue = "3h",
       tags = {ConfigTag.STORAGE},
       description = "Config parameter define time interval" +
-          " between two metadata scans by container scrubber." +
+          " between two metadata scans by container scanner." +
           " Unit could be defined with postfix (ns,ms,s,m,h,d).")
   private long metadataScanInterval = METADATA_SCAN_INTERVAL_DEFAULT;
 
@@ -83,7 +83,7 @@
       defaultValue = "1048576",
       tags = {ConfigTag.STORAGE},
       description = "Config parameter to throttle I/O bandwidth used"
-          + " by scrubber per volume.")
+          + " by scanner per volume.")
   private long bandwidthPerVolume = BANDWIDTH_PER_VOLUME_DEFAULT;
 
   @PostConstruct
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 45ce32d..2ceb5d9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -84,7 +84,7 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.VOLUME_BYTES_PER_SECOND_KEY;
+import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.VOLUME_BYTES_PER_SECOND_KEY;
 
 /**
  * Ozone main class sets up the network servers and initializes the container
@@ -305,8 +305,8 @@
    * Start background daemon thread for performing container integrity checks.
    */
   private void startContainerScrub() {
-    ContainerScrubberConfiguration c = config.getObject(
-        ContainerScrubberConfiguration.class);
+    ContainerScannerConfiguration c = config.getObject(
+        ContainerScannerConfiguration.class);
     boolean enabled = c.isEnabled();
 
     if (!enabled) {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
index b5694d7..4d8e5fa 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
@@ -26,18 +26,19 @@
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import java.io.File;
 import java.io.RandomAccessFile;
+
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 
 /**
@@ -60,8 +61,8 @@
     int deletedBlocks = 1;
     int normalBlocks = 3;
     OzoneConfiguration conf = getConf();
-    ContainerScrubberConfiguration c = conf.getObject(
-        ContainerScrubberConfiguration.class);
+    ContainerScannerConfiguration c = conf.getObject(
+        ContainerScannerConfiguration.class);
 
     // test Closed Container
     KeyValueContainer container = createContainerWithBlocks(containerID,
@@ -93,8 +94,8 @@
     int deletedBlocks = 1;
     int normalBlocks = 3;
     OzoneConfiguration conf = getConf();
-    ContainerScrubberConfiguration sc = conf.getObject(
-        ContainerScrubberConfiguration.class);
+    ContainerScannerConfiguration sc = conf.getObject(
+        ContainerScannerConfiguration.class);
 
     // test Closed Container
     KeyValueContainer container = createContainerWithBlocks(containerID,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java
similarity index 80%
rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberConfiguration.java
rename to hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java
index af2bc80..6dcebe0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberConfiguration.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java
@@ -24,18 +24,18 @@
 
 import java.time.Duration;
 
-import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.BANDWIDTH_PER_VOLUME_DEFAULT;
-import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.DATA_SCAN_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.DATA_SCAN_INTERVAL_KEY;
-import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.METADATA_SCAN_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.METADATA_SCAN_INTERVAL_KEY;
-import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration.VOLUME_BYTES_PER_SECOND_KEY;
+import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.BANDWIDTH_PER_VOLUME_DEFAULT;
+import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.DATA_SCAN_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.DATA_SCAN_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.METADATA_SCAN_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.METADATA_SCAN_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.VOLUME_BYTES_PER_SECOND_KEY;
 import static org.junit.Assert.assertEquals;
 
 /**
- * Test for {@link ContainerScrubberConfiguration}.
+ * Test for {@link ContainerScannerConfiguration}.
  */
-public class TestContainerScrubberConfiguration {
+public class TestContainerScannerConfiguration {
 
   private OzoneConfiguration conf;
 
@@ -53,8 +53,8 @@
     conf.setLong(DATA_SCAN_INTERVAL_KEY, validInterval);
     conf.setLong(VOLUME_BYTES_PER_SECOND_KEY, validBandwidth);
 
-    ContainerScrubberConfiguration csConf =
-        conf.getObject(ContainerScrubberConfiguration.class);
+    ContainerScannerConfiguration csConf =
+        conf.getObject(ContainerScannerConfiguration.class);
 
     assertEquals(validInterval, csConf.getMetadataScanInterval());
     assertEquals(validInterval, csConf.getDataScanInterval());
@@ -70,8 +70,8 @@
     conf.setLong(DATA_SCAN_INTERVAL_KEY, invalidInterval);
     conf.setLong(VOLUME_BYTES_PER_SECOND_KEY, invalidBandwidth);
 
-    ContainerScrubberConfiguration csConf =
-        conf.getObject(ContainerScrubberConfiguration.class);
+    ContainerScannerConfiguration csConf =
+        conf.getObject(ContainerScannerConfiguration.class);
 
     assertEquals(METADATA_SCAN_INTERVAL_DEFAULT,
         csConf.getMetadataScanInterval());
@@ -83,8 +83,8 @@
 
   @Test
   public void isCreatedWitDefaultValues() {
-    ContainerScrubberConfiguration csConf =
-        conf.getObject(ContainerScrubberConfiguration.class);
+    ContainerScannerConfiguration csConf =
+        conf.getObject(ContainerScannerConfiguration.class);
 
     assertEquals(false, csConf.isEnabled());
     assertEquals(METADATA_SCAN_INTERVAL_DEFAULT,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerMetrics.java
similarity index 88%
rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
rename to hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerMetrics.java
index 74b5fe5..c76fb71 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerMetrics.java
@@ -43,10 +43,10 @@
 import static org.mockito.Mockito.when;
 
 /**
- * This test verifies the container scrubber metrics functionality.
+ * This test verifies the container scanner metrics functionality.
  */
 @RunWith(MockitoJUnitRunner.class)
-public class TestContainerScrubberMetrics {
+public class TestContainerScannerMetrics {
 
   private final AtomicLong containerIdSeq = new AtomicLong(100);
 
@@ -62,31 +62,31 @@
   @Mock
   private HddsVolume vol;
 
-  private ContainerScrubberConfiguration conf;
+  private ContainerScannerConfiguration conf;
   private ContainerController controller;
 
   @Before
   public void setup() {
-    conf = newInstanceOf(ContainerScrubberConfiguration.class);
+    conf = newInstanceOf(ContainerScannerConfiguration.class);
     conf.setMetadataScanInterval(0);
     conf.setDataScanInterval(0);
     controller = mockContainerController();
   }
 
   @Test
-  public void testContainerMetaDataScrubberMetrics() {
+  public void testContainerMetaDataScannerMetrics() {
     ContainerMetadataScanner subject =
         new ContainerMetadataScanner(conf, controller);
     subject.runIteration();
 
-    ContainerMetadataScrubberMetrics metrics = subject.getMetrics();
+    ContainerMetadataScannerMetrics metrics = subject.getMetrics();
     assertEquals(1, metrics.getNumScanIterations());
     assertEquals(3, metrics.getNumContainersScanned());
     assertEquals(1, metrics.getNumUnHealthyContainers());
   }
 
   @Test
-  public void testContainerMetaDataScrubberMetricsUnregisters() {
+  public void testContainerMetaDataScannerMetricsUnregisters() {
     ContainerMetadataScanner subject =
         new ContainerMetadataScanner(conf, controller);
     String name = subject.getMetrics().getName();
@@ -100,19 +100,19 @@
   }
 
   @Test
-  public void testContainerDataScrubberMetrics() {
+  public void testContainerDataScannerMetrics() {
     ContainerDataScanner subject =
         new ContainerDataScanner(conf, controller, vol);
     subject.runIteration();
 
-    ContainerDataScrubberMetrics metrics = subject.getMetrics();
+    ContainerDataScannerMetrics metrics = subject.getMetrics();
     assertEquals(1, metrics.getNumScanIterations());
     assertEquals(2, metrics.getNumContainersScanned());
     assertEquals(1, metrics.getNumUnHealthyContainers());
   }
 
   @Test
-  public void testContainerDataScrubberMetricsUnregisters() throws IOException {
+  public void testContainerDataScannerMetricsUnregisters() throws IOException {
     HddsVolume volume = new HddsVolume.Builder("/").failedVolume(true).build();
     ContainerDataScanner subject =
         new ContainerDataScanner(conf, controller, volume);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestDataScanner.java
similarity index 95%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestDataScanner.java
index 898119f..03aa39f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestDataScanner.java
@@ -17,7 +17,7 @@
  * under the License.
  *
  */
-package org.apache.hadoop.ozone.dn.scrubber;
+package org.apache.hadoop.ozone.dn.scanner;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -26,28 +26,28 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerMetadataScanner;
-import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration;
+import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -56,30 +56,29 @@
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
+import java.io.File;
 import java.io.IOException;
 import java.time.Instant;
 import java.util.HashMap;
 import java.util.Set;
 import java.util.UUID;
-import java.io.File;
-
-import org.junit.Rule;
-import org.junit.rules.Timeout;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
 import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
 
 /**
- * This class tests the data scrubber functionality.
+ * This class tests the data scanner functionality.
  */
-public class TestDataScrubber {
+public class TestDataScanner {
 
   /**
-    * Set a timeout for each test.
-    */
+   * Set a timeout for each test.
+   */
   @Rule
   public Timeout timeout = Timeout.seconds(300);
   private static MiniOzoneCluster cluster;
@@ -174,11 +173,11 @@
     deleteDirectory(chunksDir);
     Assert.assertFalse(chunksDir.exists());
 
-    ContainerScrubberConfiguration conf = ozoneConfig.getObject(
-        ContainerScrubberConfiguration.class);
+    ContainerScannerConfiguration conf = ozoneConfig.getObject(
+        ContainerScannerConfiguration.class);
     ContainerMetadataScanner sb = new ContainerMetadataScanner(conf,
         oc.getController());
-    sb.scrub(c);
+    sb.scanContainer(c);
 
     // wait for the incremental container report to propagate to SCM
     Thread.sleep(5000);