HDDS-7163. Ozone debug container CLI supports container export. (#3715)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
index 0e1414f..0d47093 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java
@@ -68,9 +68,9 @@
    * @throws IOException
    */
   public static void initPerDiskDBStore(String containerDBPath,
-      ConfigurationSource conf) throws IOException {
+      ConfigurationSource conf, boolean readOnly) throws IOException {
     DatanodeStore store = BlockUtils.getUncachedDatanodeStore(containerDBPath,
-        OzoneConsts.SCHEMA_V3, conf, false);
+        OzoneConsts.SCHEMA_V3, conf, readOnly);
     BlockUtils.addDB(store, containerDBPath, conf, OzoneConsts.SCHEMA_V3);
   }
 
@@ -81,7 +81,7 @@
    * @param logger
    */
   public static void loadAllHddsVolumeDbStore(MutableVolumeSet hddsVolumeSet,
-      MutableVolumeSet dbVolumeSet, Logger logger) {
+      MutableVolumeSet dbVolumeSet, boolean readOnly, Logger logger) {
     // Scan subdirs under the db volumes and build a one-to-one map
     // between each HddsVolume -> DbVolume.
     mapDbVolumesToDataVolumesIfNeeded(hddsVolumeSet, dbVolumeSet);
@@ -89,7 +89,7 @@
     for (HddsVolume volume : StorageVolumeUtil.getHddsVolumesList(
         hddsVolumeSet.getVolumesList())) {
       try {
-        volume.loadDbStore();
+        volume.loadDbStore(readOnly);
       } catch (IOException e) {
         onFailure(volume);
         if (logger != null) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 3354671..100e3da 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -216,7 +216,7 @@
     return dbLoaded.get();
   }
 
-  public void loadDbStore() throws IOException {
+  public void loadDbStore(boolean readOnly) throws IOException {
     // DN startup for the first time, not registered yet,
     // so the DbVolume is not formatted.
     if (!getStorageState().equals(VolumeState.NORMAL)) {
@@ -252,7 +252,7 @@
 
     String containerDBPath = containerDBFile.getAbsolutePath();
     try {
-      initPerDiskDBStore(containerDBPath, getConf());
+      initPerDiskDBStore(containerDBPath, getConf(), readOnly);
     } catch (IOException e) {
       throw new IOException("Can't init db instance under path "
           + containerDBPath + " for volume " + getStorageID(), e);
@@ -305,7 +305,7 @@
     String containerDBPath = new File(storageIdDir, CONTAINER_DB_NAME)
         .getAbsolutePath();
     try {
-      HddsVolumeUtil.initPerDiskDBStore(containerDBPath, getConf());
+      HddsVolumeUtil.initPerDiskDBStore(containerDBPath, getConf(), false);
       dbLoaded.set(true);
       LOG.info("SchemaV3 db is created and loaded at {} for volume {}",
           containerDBPath, getStorageID());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index d2a9480..45ce32d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -148,7 +148,8 @@
         new MutableVolumeSet(datanodeDetails.getUuidString(), conf,
             context, VolumeType.DB_VOLUME, volumeChecker);
     if (SchemaV3.isFinalizedAndEnabled(config)) {
-      HddsVolumeUtil.loadAllHddsVolumeDbStore(volumeSet, dbVolumeSet, LOG);
+      HddsVolumeUtil.loadAllHddsVolumeDbStore(
+          volumeSet, dbVolumeSet, false, LOG);
     }
 
     long recoveringContainerTimeout = config.getTimeDuration(
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeSchemaV3FinalizeAction.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeSchemaV3FinalizeAction.java
index 7436284..080fcba 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeSchemaV3FinalizeAction.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DatanodeSchemaV3FinalizeAction.java
@@ -76,7 +76,8 @@
       LOG.info("Schema V3 is disabled. Won't load RocksDB in upgrade.");
       return;
     }
-    HddsVolumeUtil.loadAllHddsVolumeDbStore(dataVolumeSet, dbVolumeSet, LOG);
+    HddsVolumeUtil.loadAllHddsVolumeDbStore(
+        dataVolumeSet, dbVolumeSet, false, LOG);
   }
 }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java
index de3fc3d..3a20e22 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java
@@ -104,7 +104,7 @@
 
     // Reinitialize all the volumes to simulate a DN restart.
     reinitVolumes();
-    HddsVolumeUtil.loadAllHddsVolumeDbStore(hddsVolumeSet, null, null);
+    HddsVolumeUtil.loadAllHddsVolumeDbStore(hddsVolumeSet, null, false, null);
 
     for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList(
         hddsVolumeSet.getVolumesList())) {
@@ -136,7 +136,8 @@
 
     // Reinitialize all the volumes to simulate a DN restart.
     reinitVolumes();
-    HddsVolumeUtil.loadAllHddsVolumeDbStore(hddsVolumeSet, dbVolumeSet, null);
+    HddsVolumeUtil.loadAllHddsVolumeDbStore(
+        hddsVolumeSet, dbVolumeSet, false, null);
 
     for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList(
         hddsVolumeSet.getVolumesList())) {
@@ -191,7 +192,8 @@
     reinitVolumes();
     assertEquals(1, dbVolumeSet.getFailedVolumesList().size());
     assertEquals(VOLUMNE_NUM - 1, dbVolumeSet.getVolumesList().size());
-    HddsVolumeUtil.loadAllHddsVolumeDbStore(hddsVolumeSet, dbVolumeSet, null);
+    HddsVolumeUtil.loadAllHddsVolumeDbStore(
+        hddsVolumeSet, dbVolumeSet, false, null);
 
     int affectedVolumeCount = 0;
 
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java
index 60f8750..ea7b102 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java
@@ -35,12 +35,14 @@
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerReader;
+import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
 import org.apache.hadoop.ozone.debug.OzoneDebug;
 import org.kohsuke.MetaInfServices;
 import org.slf4j.Logger;
@@ -121,6 +123,16 @@
     volumeSet = new MutableVolumeSet(datanodeUuid, conf, null,
         StorageVolume.VolumeType.DATA_VOLUME, null);
 
+    if (VersionedDatanodeFeatures.SchemaV3.isFinalizedAndEnabled(conf)) {
+      MutableVolumeSet dbVolumeSet =
+          HddsServerUtil.getDatanodeDbDirs(conf).isEmpty() ? null :
+          new MutableVolumeSet(datanodeUuid, conf, null,
+              StorageVolume.VolumeType.DB_VOLUME, null);
+      // load rocksDB with readOnly mode, otherwise it will fail.
+      HddsVolumeUtil.loadAllHddsVolumeDbStore(
+          volumeSet, dbVolumeSet, true, LOG);
+    }
+
     Map<ContainerProtos.ContainerType, Handler> handlers = new HashMap<>();
 
     for (ContainerProtos.ContainerType containerType
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ExportSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ExportSubcommand.java
index 67ebb15..0a00959 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ExportSubcommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ExportSubcommand.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.debug.container;
 
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource;
 import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource;
 import org.slf4j.Logger;
@@ -30,6 +31,8 @@
 import java.io.FileOutputStream;
 import java.util.concurrent.Callable;
 
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND;
+
 /**
  * Handles {@code ozone debug container export} command.
  */
@@ -51,9 +54,13 @@
 
   @CommandLine.Option(names = {"--dest"},
       defaultValue = "/tmp",
-      description = "Destination directory")
+      description = "Destination directory to hold exported container files")
   private String destination;
 
+  @CommandLine.Option(names = {"--count"},
+      description = "Count of containers to export")
+  private long containerCount = 1;
+
   @Override
   public Void call() throws Exception {
     parent.loadContainersFromVolumes();
@@ -61,18 +68,22 @@
     final ContainerReplicationSource replicationSource =
         new OnDemandContainerReplicationSource(parent.getController());
 
-    LOG.info("Starting to replication");
-
-    replicationSource.prepare(containerId);
-    LOG.info("Preparation is done");
-
-    final File destinationFile =
-        new File(destination, "container-" + containerId + ".tar.gz");
-    try (FileOutputStream fos = new FileOutputStream(destinationFile)) {
-      replicationSource.copyData(containerId, fos);
+    for (int i = 0; i < containerCount; i++) {
+      replicationSource.prepare(containerId);
+      final File destinationFile =
+          new File(destination, "container-" + containerId + ".tar.gz");
+      try (FileOutputStream fos = new FileOutputStream(destinationFile)) {
+        try {
+          replicationSource.copyData(containerId, fos);
+        } catch (StorageContainerException e) {
+          if (e.getResult() == CONTAINER_NOT_FOUND) {
+            continue;
+          }
+        }
+      }
+      LOG.info("Container {} is exported to {}", containerId, destinationFile);
+      containerId++;
     }
-    LOG.info("Container is exported to {}", destinationFile);
-
     return null;
   }
 }