HDFS-16205. Make hdfs_allowSnapshot tool cross platform (#3388)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
index b8f06e37..d7d20ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt
@@ -67,6 +67,7 @@
 
 add_subdirectory(x-platform)
 add_subdirectory(utils)
+add_subdirectory(tools)
 
 add_executable(uri_test uri_test.cc)
 target_link_libraries(uri_test common gmock_main ${CMAKE_THREAD_LIBS_INIT})
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/CMakeLists.txt
new file mode 100644
index 0000000..421a66d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/CMakeLists.txt
@@ -0,0 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+add_executable(hdfs_allowSnapshot_test hdfs-allow-snapshot-mock.cc hdfs-tool-test.cc main.cc)
+target_include_directories(hdfs_allowSnapshot_test PRIVATE ../tools ../../tools ../../tools/hdfs-allow-snapshot)
+target_link_libraries(hdfs_allowSnapshot_test PRIVATE gmock_main hdfs_allowSnapshot_lib)
+add_test(hdfs_allowSnapshot_test hdfs_allowSnapshot_test)
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-allow-snapshot-mock.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-allow-snapshot-mock.cc
new file mode 100644
index 0000000..bbfecef
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-allow-snapshot-mock.cc
@@ -0,0 +1,33 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include "hdfs-allow-snapshot-mock.h"
+
+namespace hdfs::tools::test {
+bool AllowSnapshotMock::HandleHelp() const { return true; }
+
+bool AllowSnapshotMock::HandlePath(const std::string &path) const {
+  EXPECT_STREQ(path.c_str(), "a/b/c") << "Expecting the path a/b/c here";
+  return true;
+}
+} // namespace hdfs::tools::test
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-allow-snapshot-mock.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-allow-snapshot-mock.h
new file mode 100644
index 0000000..c080a00
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-allow-snapshot-mock.h
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFSPP_TOOLS_HDFS_ALLOW_SNAPSHOT_MOCK
+#define LIBHDFSPP_TOOLS_HDFS_ALLOW_SNAPSHOT_MOCK
+
+#include <string>
+
+#include "hdfs-allow-snapshot.h"
+
+namespace hdfs::tools::test {
+/**
+ * {@class AllowSnapshotMock} is an {@class AllowSnapshot} whereby it mocks the
+ * HandleHelp and HandlePath methods for testing their functionality.
+ */
+class AllowSnapshotMock : public hdfs::tools::AllowSnapshot {
+public:
+  /**
+   * {@inheritdoc}
+   */
+  AllowSnapshotMock(const int argc, char **argv) : AllowSnapshot(argc, argv) {}
+
+  // Abiding to the Rule of 5
+  AllowSnapshotMock(const AllowSnapshotMock &) = default;
+  AllowSnapshotMock(AllowSnapshotMock &&) = default;
+  AllowSnapshotMock &operator=(const AllowSnapshotMock &) = delete;
+  AllowSnapshotMock &operator=(AllowSnapshotMock &&) = delete;
+  ~AllowSnapshotMock() override = default;
+
+  /**
+   * {@inheritdoc}
+   */
+  [[nodiscard]] bool HandleHelp() const override;
+
+  /**
+   * {@inheritdoc}
+   */
+  [[nodiscard]] bool HandlePath(const std::string &path) const override;
+};
+} // namespace hdfs::tools::test
+
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-tool-test.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-tool-test.cc
new file mode 100644
index 0000000..1b8cff1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-tool-test.cc
@@ -0,0 +1,35 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <gtest/gtest.h>
+
+#include "hdfs-allow-snapshot-mock.h"
+#include "hdfs-tool-test.h"
+
+HdfsToolBasicTest::~HdfsToolBasicTest() {}
+HdfsToolNegativeTest::~HdfsToolNegativeTest() {}
+
+INSTANTIATE_TEST_SUITE_P(
+    HdfsAllowSnapshot, HdfsToolBasicTest,
+    testing::Values(PassAPath<hdfs::tools::test::AllowSnapshotMock>,
+                    CallHelp<hdfs::tools::test::AllowSnapshotMock>));
+
+INSTANTIATE_TEST_SUITE_P(
+    HdfsAllowSnapshot, HdfsToolNegativeTest,
+    testing::Values(Pass2Paths<hdfs::tools::test::AllowSnapshotMock>));
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-tool-test.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-tool-test.h
new file mode 100644
index 0000000..be695b6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/hdfs-tool-test.h
@@ -0,0 +1,112 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#ifndef LIBHDFSPP_TOOLS_HDFS_TOOL_TEST
+#define LIBHDFSPP_TOOLS_HDFS_TOOL_TEST
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <tuple>
+
+#include <gtest/gtest.h>
+
+#include "hdfs-tool.h"
+
+/**
+ * {@class HdfsToolBasicTest} is a fixture that houses basic tests on {@class
+ * hdfs::tools::HdfsTool} interface. It contains the "Happy path" tests which
+ * covers the scenarios where {@class hdfs::tools::HdfsTool} is expected to
+ * work just fine.
+ *
+ * {@class HdfsToolBasicTest} is parameterized on a lambda returning an instance
+ * of {@class hdfs::tools::HdfsTool} wrapped in a std::unique_ptr. We then run
+ * the tests on this instance. Each test runs in isolation. So, a new instance
+ * is created for each test.
+ */
+class HdfsToolBasicTest
+    : public testing::TestWithParam<
+          std::function<std::unique_ptr<hdfs::tools::HdfsTool>()>> {
+public:
+  // Abiding to the rule of 5
+  HdfsToolBasicTest() = default;
+  HdfsToolBasicTest(const HdfsToolBasicTest &) = delete;
+  HdfsToolBasicTest(HdfsToolBasicTest &&) = delete;
+  HdfsToolBasicTest &operator=(const HdfsToolBasicTest &) = delete;
+  HdfsToolBasicTest &operator=(HdfsToolBasicTest &&) = delete;
+  ~HdfsToolBasicTest() override;
+
+protected:
+  void SetUp() override { hdfs_tool_ = GetParam()(); }
+
+  std::unique_ptr<hdfs::tools::HdfsTool> hdfs_tool_{nullptr};
+};
+
+/**
+ * {@class HdfsToolNegativeTest} is a fixture that houses negative tests on
+ * {@class hdfs::tools::HdfsTool} interface. It covers the tests where
+ * unfavorable inputs are presented to the {@class hdfs::tools::HdfsTool}
+ * instance and is expected to not crash and is handled gracefully.
+ */
+class HdfsToolNegativeTest : public HdfsToolBasicTest {
+public:
+  // Abiding to the rule of 5
+  HdfsToolNegativeTest() = default;
+  HdfsToolNegativeTest(const HdfsToolNegativeTest &) = delete;
+  HdfsToolNegativeTest(HdfsToolNegativeTest &&) = delete;
+  HdfsToolNegativeTest &operator=(const HdfsToolNegativeTest &) = delete;
+  HdfsToolNegativeTest &operator=(HdfsToolNegativeTest &&) = delete;
+  ~HdfsToolNegativeTest() override;
+};
+
+TEST_P(HdfsToolBasicTest, RunTool) { EXPECT_TRUE(this->hdfs_tool_->Do()); }
+
+TEST_P(HdfsToolNegativeTest, RunTool) {
+  EXPECT_ANY_THROW({ std::ignore = this->hdfs_tool_->Do(); });
+}
+
+template <class T> std::unique_ptr<hdfs::tools::HdfsTool> PassAPath() {
+  constexpr auto argc = 2;
+  static std::string exe("hdfs_tool_name");
+  static std::string arg1("a/b/c");
+
+  static char *argv[] = {exe.data(), arg1.data()};
+  return std::make_unique<T>(argc, argv);
+}
+
+template <class T> std::unique_ptr<hdfs::tools::HdfsTool> CallHelp() {
+  constexpr auto argc = 2;
+  static std::string exe("hdfs_tool_name");
+  static std::string arg1("-h");
+
+  static char *argv[] = {exe.data(), arg1.data()};
+  return std::make_unique<T>(argc, argv);
+}
+
+template <class T> std::unique_ptr<hdfs::tools::HdfsTool> Pass2Paths() {
+  constexpr auto argc = 3;
+  static std::string exe("hdfs_tool_name");
+  static std::string arg1("a/b/c");
+  static std::string arg2("d/e/f");
+
+  static char *argv[] = {exe.data(), arg1.data(), arg2.data()};
+  return std::make_unique<T>(argc, argv);
+}
+
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/main.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/main.cc
new file mode 100644
index 0000000..7f9fefc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/tools/main.cc
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+int main(int argc, char *argv[]) {
+  ::testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
index dbcb97f..9f4f984 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/CMakeLists.txt
@@ -16,6 +16,8 @@
 # limitations under the License.
 #
 
+find_package(Boost 1.72 COMPONENTS program_options REQUIRED)
+
 # Default LIBHDFSPP_DIR to the default install location.  You can override
 #    it by add -DLIBHDFSPP_DIR=... to your cmake invocation
 set(LIBHDFSPP_DIR CACHE STRING ${CMAKE_INSTALL_PREFIX})
@@ -26,6 +28,10 @@
 add_library(tools_common_obj OBJECT tools_common.cc)
 add_library(tools_common $<TARGET_OBJECTS:tools_common_obj>)
 
+add_library(hdfs_tool_obj OBJECT hdfs-tool.cc)
+target_include_directories(hdfs_tool_obj PRIVATE ../tools)
+
+
 add_executable(hdfs_cat hdfs_cat.cc)
 target_link_libraries(hdfs_cat tools_common hdfspp_static)
 
@@ -74,8 +80,14 @@
 add_executable(hdfs_setrep hdfs_setrep.cc)
 target_link_libraries(hdfs_setrep tools_common hdfspp_static)
 
-add_executable(hdfs_allowSnapshot hdfs_allowSnapshot.cc)
-target_link_libraries(hdfs_allowSnapshot tools_common hdfspp_static)
+add_library(hdfs_allowSnapshot_lib STATIC  $<TARGET_OBJECTS:hdfs_tool_obj> hdfs-allow-snapshot/hdfs-allow-snapshot.cc)
+target_include_directories(hdfs_allowSnapshot_lib PRIVATE ../tools allow-snapshot ${Boost_INCLUDE_DIRS})
+target_link_libraries(hdfs_allowSnapshot_lib PRIVATE Boost::boost Boost::program_options tools_common hdfspp_static)
+
+add_executable(hdfs_allowSnapshot hdfs-allow-snapshot/main.cc)
+target_include_directories(hdfs_allowSnapshot PRIVATE ../tools)
+target_link_libraries(hdfs_allowSnapshot PRIVATE hdfs_allowSnapshot_lib)
+
 
 add_executable(hdfs_disallowSnapshot hdfs_disallowSnapshot.cc)
 target_link_libraries(hdfs_disallowSnapshot tools_common hdfspp_static)
@@ -90,4 +102,4 @@
 target_link_libraries(hdfs_deleteSnapshot tools_common hdfspp_static)
 
 add_executable(hdfs_tail hdfs_tail.cc)
-target_link_libraries(hdfs_tail tools_common hdfspp_static)
\ No newline at end of file
+target_link_libraries(hdfs_tail tools_common hdfspp_static)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/hdfs-allow-snapshot.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/hdfs-allow-snapshot.cc
new file mode 100644
index 0000000..991c39b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/hdfs-allow-snapshot.cc
@@ -0,0 +1,120 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <iostream>
+#include <memory>
+#include <ostream>
+#include <sstream>
+#include <string>
+
+#include <google/protobuf/stubs/common.h>
+
+#include "hdfs-allow-snapshot.h"
+#include "tools_common.h"
+
+namespace hdfs::tools {
+AllowSnapshot::AllowSnapshot(const int argc, char **argv)
+    : HdfsTool(argc, argv) {}
+
+bool AllowSnapshot::Initialize() {
+  opt_desc_.add_options()("help,h", "Show the help for hdfs_allowSnapshot")(
+      "path", po::value<std::string>(),
+      "The path to the directory to make it snapshot-able");
+
+  // We allow only one argument to be passed to this tool. An exception is
+  // thrown if multiple arguments are passed.
+  pos_opt_desc_.add("path", 1);
+
+  po::store(po::command_line_parser(argc_, argv_)
+                .options(opt_desc_)
+                .positional(pos_opt_desc_)
+                .run(),
+            opt_val_);
+  po::notify(opt_val_);
+  return true;
+}
+
+std::string AllowSnapshot::GetDescription() const {
+  std::stringstream desc;
+  desc << "Usage: hdfs_allowSnapshot [OPTION] PATH" << std::endl
+       << std::endl
+       << "Allowing snapshots of a directory at PATH to be created."
+       << std::endl
+       << "If the operation completes successfully, the directory becomes "
+          "snapshottable."
+       << std::endl
+       << std::endl
+       << "  -h        display this help and exit" << std::endl
+       << std::endl
+       << "Examples:" << std::endl
+       << "hdfs_allowSnapshot hdfs://localhost.localdomain:8020/dir"
+       << std::endl
+       << "hdfs_allowSnapshot /dir1/dir2" << std::endl;
+  return desc.str();
+}
+
+bool AllowSnapshot::Do() {
+  if (!Initialize()) {
+    std::cerr << "Unable to initialize HDFS allow snapshot tool" << std::endl;
+    return false;
+  }
+
+  if (!ValidateConstraints()) {
+    std::cout << GetDescription();
+    return false;
+  }
+
+  if (opt_val_.count("help") > 0) {
+    return HandleHelp();
+  }
+
+  if (opt_val_.count("path") > 0) {
+    const auto path = opt_val_["path"].as<std::string>();
+    return HandlePath(path);
+  }
+
+  return true;
+}
+
+bool AllowSnapshot::HandleHelp() const {
+  std::cout << GetDescription();
+  return true;
+}
+
+bool AllowSnapshot::HandlePath(const std::string &path) const {
+  // Building a URI object from the given uri_path
+  auto uri = hdfs::parse_path_or_exit(path);
+
+  const auto fs = hdfs::doConnect(uri, false);
+  if (fs == nullptr) {
+    std::cerr << "Could not connect to the file system. " << std::endl;
+    return false;
+  }
+
+  const auto status = fs->AllowSnapshot(uri.get_path());
+  if (!status.ok()) {
+    std::cerr << "Error: " << status.ToString() << std::endl;
+    return false;
+  }
+
+  // Clean up static data and prevent valgrind memory leaks
+  google::protobuf::ShutdownProtobufLibrary();
+  return true;
+}
+} // namespace hdfs::tools
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/hdfs-allow-snapshot.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/hdfs-allow-snapshot.h
new file mode 100644
index 0000000..fe515c3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/hdfs-allow-snapshot.h
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFSPP_TOOLS_HDFS_ALLOW_SNAPSHOT
+#define LIBHDFSPP_TOOLS_HDFS_ALLOW_SNAPSHOT
+
+#include <string>
+
+#include <boost/program_options.hpp>
+
+#include "hdfs-tool.h"
+
+namespace hdfs::tools {
+/**
+ * {@class AllowSnapshot} is an {@class HdfsTool} that facilitates the snapshots
+ * of a directory at PATH to be created, causing the directory to be
+ * snapshot-able.
+ */
+class AllowSnapshot : public HdfsTool {
+public:
+  /**
+   * {@inheritdoc}
+   */
+  AllowSnapshot(int argc, char **argv);
+
+  // Abiding to the Rule of 5
+  AllowSnapshot(const AllowSnapshot &) = default;
+  AllowSnapshot(AllowSnapshot &&) = default;
+  AllowSnapshot &operator=(const AllowSnapshot &) = delete;
+  AllowSnapshot &operator=(AllowSnapshot &&) = delete;
+  ~AllowSnapshot() override = default;
+
+  /**
+   * {@inheritdoc}
+   */
+  [[nodiscard]] std::string GetDescription() const override;
+
+  /**
+   * {@inheritdoc}
+   */
+  [[nodiscard]] bool Do() override;
+
+protected:
+  /**
+   * {@inheritdoc}
+   */
+  [[nodiscard]] bool Initialize() override;
+
+  /**
+   * {@inheritdoc}
+   */
+  [[nodiscard]] bool ValidateConstraints() const override { return argc_ > 1; }
+
+  /**
+   * {@inheritdoc}
+   */
+  [[nodiscard]] bool HandleHelp() const override;
+
+  /**
+   * Handle the path argument that's passed to this tool.
+   *
+   * @param path The path to the directory that needs to be made snapshot-able.
+   *
+   * @return A boolean indicating the result of this operation.
+   */
+  [[nodiscard]] virtual bool HandlePath(const std::string &path) const;
+
+private:
+  /**
+   * A boost data-structure containing the description of positional arguments
+   * passed to the command-line.
+   */
+  po::positional_options_description pos_opt_desc_;
+};
+} // namespace hdfs::tools
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/main.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/main.cc
new file mode 100644
index 0000000..e24fc7d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-allow-snapshot/main.cc
@@ -0,0 +1,41 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing,
+  software distributed under the License is distributed on an
+  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  KIND, either express or implied.  See the License for the
+  specific language governing permissions and limitations
+  under the License.
+*/
+
+#include <cstdlib>
+#include <exception>
+#include <iostream>
+
+#include "hdfs-allow-snapshot.h"
+
+int main(int argc, char *argv[]) {
+  hdfs::tools::AllowSnapshot allow_snapshot(argc, argv);
+  auto success = false;
+
+  try {
+    success = allow_snapshot.Do();
+  } catch (const std::exception &e) {
+    std::cerr << "Error: " << e.what() << std::endl;
+    success = false;
+  }
+
+  if (!success) {
+    exit(EXIT_FAILURE);
+  }
+  return 0;
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-tool.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-tool.cc
new file mode 100644
index 0000000..c4508d5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-tool.cc
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs-tool.h"
+
+/**
+ * Need to implement the destructor out-of-line since it's a virtual destructor.
+ * An inline destructor would cause all the v-table entries pertaining to the
+ * HdfsTool to copy its definition everywhere, making it bulky.
+ */
+hdfs::tools::HdfsTool::~HdfsTool() {}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-tool.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-tool.h
new file mode 100644
index 0000000..411c3c2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs-tool.h
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFSPP_TOOLS_HDFS_TOOL
+#define LIBHDFSPP_TOOLS_HDFS_TOOL
+
+#include <boost/program_options.hpp>
+
+namespace hdfs::tools {
+namespace po = boost::program_options;
+
+/**
+ * {@class HdfsTool} is the base class for HDFS utility tools.
+ * It serves as an interface for data flow from the command-line
+ * to invoking the corresponding HDFS API.
+ */
+class HdfsTool {
+public:
+  /**
+   * @param argc Count of the arguments on command-line.
+   * @param argv Pointer to pointer to an array of chars containing the
+   * command-line arguments.
+   */
+  HdfsTool(const int argc, char **argv) : argc_{argc}, argv_{argv} {}
+
+  // Abiding to the Rule of 5
+  HdfsTool(const HdfsTool &) = default;
+  HdfsTool(HdfsTool &&) = default;
+  HdfsTool &operator=(const HdfsTool &) = delete;
+  HdfsTool &operator=(HdfsTool &&) = delete;
+  virtual ~HdfsTool();
+
+  /**
+   * @return The description of this tool.
+   */
+  [[nodiscard]] virtual std::string GetDescription() const = 0;
+
+  /**
+   * Perform the core task of this tool.
+   *
+   * @return A boolean indicating the result of the task performed by this tool.
+   */
+  [[nodiscard]] virtual bool Do() = 0;
+
+protected:
+  /**
+   * Initialize the members. It's expected that the Do method calls
+   * Initialize method before doing anything. We're doing the
+   * initialization in a method (instead of the constructor) for better
+   * handling. We typically do the parsing of the command-line arguments here.
+   *
+   * @return A boolean indicating the result of the initialization.
+   */
+  [[nodiscard]] virtual bool Initialize() = 0;
+
+  /**
+   * Validates whether the tool has the necessary input data to perform its
+   * task.
+   *
+   * @return A boolean indicating the result of the validation.
+   *
+   */
+  [[nodiscard]] virtual bool ValidateConstraints() const = 0;
+
+  /**
+   * All derivatives of HdfsTool must implement a way to help the user,
+   * by displaying the relevant information about the tool in the general case.
+   *
+   * @return A boolean indicating the result of the help task.
+   */
+  [[nodiscard]] virtual bool HandleHelp() const = 0;
+
+  /**
+   * Count of the arguments on command-line.
+   */
+  int argc_{0};
+
+  /**
+   * Pointer to pointer to an array of chars containing the command-line
+   * arguments.
+   */
+  char **argv_{nullptr};
+
+  /**
+   * A boost data-structure containing the mapping between the option and the
+   * value passed to the command-line.
+   */
+  po::variables_map opt_val_;
+
+  /**
+   * A boost data-structure containing the description of the options supported
+   * by this tool and also, the description of the tool itself.
+   */
+  po::options_description opt_desc_;
+};
+} // namespace hdfs::tools
+
+#endif
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc
deleted file mode 100644
index 0293ee2..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tools/hdfs_allowSnapshot.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing,
-  software distributed under the License is distributed on an
-  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  KIND, either express or implied.  See the License for the
-  specific language governing permissions and limitations
-  under the License.
-*/
-
-#include <google/protobuf/stubs/common.h>
-#include <unistd.h>
-#include "tools_common.h"
-
-void usage(){
-  std::cout << "Usage: hdfs_allowSnapshot [OPTION] PATH"
-      << std::endl
-      << std::endl << "Allowing snapshots of a directory at PATH to be created."
-      << std::endl << "If the operation completes successfully, the directory becomes snapshottable."
-      << std::endl
-      << std::endl << "  -h        display this help and exit"
-      << std::endl
-      << std::endl << "Examples:"
-      << std::endl << "hdfs_allowSnapshot hdfs://localhost.localdomain:8020/dir"
-      << std::endl << "hdfs_allowSnapshot /dir1/dir2"
-      << std::endl;
-}
-
-int main(int argc, char *argv[]) {
-  //We should have at least 2 arguments
-  if (argc < 2) {
-    usage();
-    exit(EXIT_FAILURE);
-  }
-
-  int input;
-
-  //Using GetOpt to read in the values
-  opterr = 0;
-  while ((input = getopt(argc, argv, "h")) != -1) {
-    switch (input)
-    {
-    case 'h':
-      usage();
-      exit(EXIT_SUCCESS);
-    case '?':
-      if (isprint(optopt))
-        std::cerr << "Unknown option `-" << (char) optopt << "'." << std::endl;
-      else
-        std::cerr << "Unknown option character `" << (char) optopt << "'." << std::endl;
-      usage();
-      exit(EXIT_FAILURE);
-    default:
-      exit(EXIT_FAILURE);
-    }
-  }
-  std::string uri_path = argv[optind];
-
-  //Building a URI object from the given uri_path
-  hdfs::URI uri = hdfs::parse_path_or_exit(uri_path);
-
-  std::shared_ptr<hdfs::FileSystem> fs = hdfs::doConnect(uri, false);
-  if (!fs) {
-    std::cerr << "Could not connect the file system. " << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  hdfs::Status status = fs->AllowSnapshot(uri.get_path());
-  if (!status.ok()) {
-    std::cerr << "Error: " << status.ToString() << std::endl;
-    exit(EXIT_FAILURE);
-  }
-
-  // Clean up static data and prevent valgrind memory leaks
-  google::protobuf::ShutdownProtobufLibrary();
-  return 0;
-}