AMBARI-861. Support for Hadoop 2.0 includes puppet script changes.

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/branch-0.9-h2-dev@1398196 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/hmc/js/manageServices.js b/hmc/js/manageServices.js
index a62834f..12b7f86 100644
--- a/hmc/js/manageServices.js
+++ b/hmc/js/manageServices.js
@@ -518,6 +518,7 @@
     }
 
     var notReconfigurable = [ 'PIG', 'SQOOP', 'OOZIE', 'TEMPLETON', 'GANGLIA', 'HIVE' ];
+    var notReconfigurable = [ 'PIG', 'PIG2', 'SQOOP', 'OOZIE', 'TEMPLETON', 'GANGLIA', 'HIVE', 'HIVE2' ];
     var reconfigureClass;
     if (globalYui.Array.indexOf(notReconfigurable, serviceName) >= 0) {
       reconfigureClass = 'serviceManagementEntryActionReconfigure disabled';
diff --git a/hmc/js/selectComponents.js b/hmc/js/selectComponents.js
index 1410def..b8c4ece 100644
--- a/hmc/js/selectComponents.js
+++ b/hmc/js/selectComponents.js
@@ -67,7 +67,8 @@
           }
           if (!component[componentIndex]['isMaster'] && 
               !component[componentIndex]['isClient'] &&
-              component[componentIndex].componentName != 'HIVE_MYSQL') {
+              component[componentIndex].componentName != 'HIVE_MYSQL' && 
+              component[componentIndex].componentName != 'HIVE2_MYSQL') {
             divContent += '<li class="selectServicesEntry" name=try>';
             divContent +=   '<label class="checkbox" for="install' + serviceName + 'Id">' 
                         +   '<input type="checkbox" disabled="disabled" checked="yes" name="' + serviceName + '" id="install' + serviceName + 'Id" value="' + component[componentIndex].componentName + '"/>'
diff --git a/hmc/package/rpm/create_hmc_rpm.sh b/hmc/package/rpm/create_hmc_rpm.sh
index 7b81833..115692b 100755
--- a/hmc/package/rpm/create_hmc_rpm.sh
+++ b/hmc/package/rpm/create_hmc_rpm.sh
@@ -27,16 +27,16 @@
 fi
 
 if [[ -z "${VERSION}" ]]; then
-  VERSION="0.9.0"
+  VERSION="2.0.0.1"
 fi
 
 if [[ -z "${RELEASE}" ]]; then
-  RELEASE="0"
+  RELEASE="1"
 fi
 
 rm -rf ${BUILD_DIR}/*
 
-PKG_NAME="ambari"
+PKG_NAME="hmc"
 
 HMC_DIR="${BUILD_DIR}/${PKG_NAME}-$VERSION/"
 
diff --git a/hmc/package/rpm/hmc-nodep.spec b/hmc/package/rpm/hmc-nodep.spec
index 7ca3d4f..389230e 100644
--- a/hmc/package/rpm/hmc-nodep.spec
+++ b/hmc/package/rpm/hmc-nodep.spec
@@ -24,7 +24,7 @@
 
 Summary: HMC without PHP/Ruby dependencies
 Name: hmc-nodep
-Version: 0.9.0
+Version: 1.0.0
 URL: http://hortonworks.com
 Release: 2%{?dist}
 License: Apache License, Version 2.0
diff --git a/hmc/package/rpm/hmc-php53.spec b/hmc/package/rpm/hmc-php53.spec
index f3c0521..ef87c6b 100644
--- a/hmc/package/rpm/hmc-php53.spec
+++ b/hmc/package/rpm/hmc-php53.spec
@@ -24,11 +24,11 @@
 
 Summary: HMC for PHP 5.3
 Name: hmc-php53
-Version: 0.9.0
-URL: http://incubator.apache.org/projects/ambari.html
+Version: 1.0.0
+URL: http://hortonworks.com
 Release: 2%{?dist}
 License: Apache License, Version 2.0
-Vendor: Apache Software Foundation <ambari-dev@incubator.apache.org>
+Vendor: Hortonworks <hmc-dev-group@hortonworks.com>
 Group: System Environment/Base
 Source: hmc-%{version}.tar.gz
 Source1: hmc.init.in
@@ -148,5 +148,5 @@
 #%clean
 #%__rm -rf $RPM_BUILD_ROOT
 %changelog
-* Thu Jun 07 2012 Ambari <ambari-dev@incubator.apache.org>
+* Wed Apr 04 2012 Hortonworks <ambari-group@hortonworks.com>
 - Initial version
diff --git a/hmc/php/conf/Config.inc b/hmc/php/conf/Config.inc
index d415f9b..2433ea4 100644
--- a/hmc/php/conf/Config.inc
+++ b/hmc/php/conf/Config.inc
@@ -27,7 +27,7 @@
 $GLOBALS["BACKGROUND_EXECUTOR_PATH"] = $INSTALL_ROOT . "/php/util/BackgroundExecutor.php";
 $GLOBALS["PHP_EXEC_PATH"] = "/usr/bin/php";
 $GLOBALS["CLUSTERMAIN_PATH"] = $INSTALL_ROOT . "/php/orchestrator/ClusterMain.php";
-$GLOBALS["DB_PATH"] = "/var/db/hmc/data/data.db";
+$GLOBALS["DB_PATH"] = "/var/db/hmc/data/hdp2.db";
 
 $GLOBALS["puppetMaxParallelKicks"] = 10;
 $GLOBALS["puppetMasterModulesDirectory"] = "/etc/puppet/master/modules";
@@ -66,4 +66,6 @@
 $GLOBALS["DRYRUN"] = TRUE;
 */
 
+define("AMBARI_HADOOP_1", "1.0");
+define("AMBARI_HADOOP_2", "2.0");
 ?>
diff --git a/hmc/php/db/HMCDBAccessor.php b/hmc/php/db/HMCDBAccessor.php
index d81803d..36b1dff 100644
--- a/hmc/php/db/HMCDBAccessor.php
+++ b/hmc/php/db/HMCDBAccessor.php
@@ -100,6 +100,41 @@
   }
 
   /**
+   * Get cluster version for a given clusterName
+   * @param string $clusterName Cluster Name
+   * @return mixed
+   *   array ( "version" => $version,
+   *           "result" => 0,
+   *           "error" => "");
+   */
+  public function getHadoopStackVersion($clusterName) {
+    LockAcquire();
+    $query = "SELECT version FROM Clusters WHERE cluster_name = "
+        . $this->dbHandle->quote($clusterName);
+    $response = array ( "result" => 0, "error" => "");
+    $this->logger->log_trace("Running query: $query");
+    $pdoStmt = $this->dbHandle->query($query);
+    if ($pdoStmt === FALSE) {
+      $error = $this->getLastDBErrorAsString();
+      $this->logger->log_error("Error when executing query"
+          . ", query=".$query
+          . ", error=".$error);
+      $response["result"] = 1;
+      $response["error"] = $error;
+      LockRelease(); return $response;
+    }
+    $result = $pdoStmt->fetchAll(PDO::FETCH_BOTH);
+    if (isset($result) && is_array($result) && count($result) == 1) {
+      $response["version"] = $result[0]["version"];
+      LockRelease(); 
+      return $response;
+    }
+
+    LockRelease(); 
+    return $response;
+  }
+
+  /**
    * Update cluster state for a given clusterName
    * @param string $clusterName Cluster Name
    * @param string $state New state of cluster
diff --git a/hmc/php/db/OrchestratorDB.php b/hmc/php/db/OrchestratorDB.php
index 004faa2..c01146e 100644
--- a/hmc/php/db/OrchestratorDB.php
+++ b/hmc/php/db/OrchestratorDB.php
@@ -125,7 +125,7 @@
         $this->logger->log_error("Found service with invalid state"
             . ", service=" . $svc["serviceName"]
             . ", state=" . $svc["state"]);
-        $state = STATE::UNKNOWN; // unknown
+        $state = 0; // unknown
       }
 
       $svcObj =
@@ -155,12 +155,6 @@
     }
 
     $state = STATE::getStateFromString($serviceInfo["state"]);
-    if ($state === FALSE) {
-      $this->logger->log_error("Found service with invalid state"
-          . ", service=" . $serviceName
-          . ", state=" . $serviceInfo["state"]);
-      $state = STATE::UNKNOWN; // unknown
-    }
     return $this->getServiceObj($serviceName, $state, $this, $this->puppet);
   }
 
@@ -206,7 +200,7 @@
         $this->logger->log_error("Found service with invalid state"
             . ", service=" . $svc["serviceName"]
             . ", state=" . $svc["state"]);
-        $state = STATE::UNKNOWN; // unknown
+        $state = 0; // unknown
       }
 
       $svcObj =
@@ -249,7 +243,7 @@
         $this->logger->log_warn("Found service with invalid state"
             . ", service=" . $svc["serviceName"]
             . ", state=" . $svc["state"]);
-        $state = STATE::UNKNOWN; // unknown
+        $state = 0; // unknown
       }
 
       $svcObj =
@@ -296,7 +290,7 @@
         $this->logger->log_warn("Found component with invalid state"
             . ", component=" . $comp["componentName"]
             . ", state=" . $comp["state"]);
-        $state = STATE::UNKNOWN;
+        $state = 0;
       }
       $isClient = FALSE;
       if (isset($fullCompList[$comp["componentName"]])) {
@@ -317,13 +311,13 @@
   }
 
   public function  getNagiosServerComponent() {
-    $svc =  $this->getService("NAGIOS");
+    $svc =  $this->getService("NAGIOS2");
     if ($svc === FALSE) {
       return $svc;
     }
-    $compObjs = $this->getServiceComponents("NAGIOS");
+    $compObjs = $this->getServiceComponents("NAGIOS2");
     foreach ($compObjs as $compObj) {
-      if ($compObj->name == "NAGIOS_SERVER") {
+      if ($compObj->name == "NAGIOS_SERVER2") {
         return $compObj;
       }
     }
@@ -398,7 +392,7 @@
         $this->logger->log_error("Found component with invalid state"
             . ", component=" . $comp["componentName"]
             . ", state=" . $comp["state"]);
-        $state = STATE::UNKNOWN;
+        $state = 0;
       }
       $isClient = FALSE;
       if (isset($fullCompList[$comp["componentName"]])) {
@@ -470,7 +464,7 @@
         $this->logger->log_error("Found component with invalid state"
         . ", component=" . $comp["componentName"]
         . ", state=" . $comp["state"]);
-        $state = STATE::UNKNOWN;
+        $state = 0;
       }
       $isClient = FALSE;
       if (isset($fullCompList[$comp["componentName"]])) {
diff --git a/hmc/php/frontend/assignMasters.php b/hmc/php/frontend/assignMasters.php
index 3d8775d..9f9e8dc 100644
--- a/hmc/php/frontend/assignMasters.php
+++ b/hmc/php/frontend/assignMasters.php
@@ -104,11 +104,11 @@
   hack till here */
 
 // choose the name node as the canary host.
-$nameNodeHost = $componentsToHosts["NAMENODE"];
+$nameNodeHost = $componentsToHosts["NAMENODE2"];
 $AllMountPoints = array();
 $nameNodeInfoResult = $dbAccessor->getHostInfo($clusterName, $nameNodeHost[0]);
 if ($nameNodeInfoResult["result"] != 0 ) {
-  $logger->log_error("Got error while getting canary host info ".$nameNodeInfoResult["error"]);
+  $logger->log_error("Got error while getting canary host info for NAMENODE2".$nameNodeInfoResult["error"]);
   print json_encode($nameNodeInfoResult);
   return;
 }
@@ -144,6 +144,35 @@
                                                                     "suffix" => "hadoop/hdfs/namesecondary"
                                                                     )
                                             ),
+                                "HDFS2" => array(
+                                             "dfs_name_dir" => array(
+                                                                    "displayName" => "NameNode directories",
+                                                                    "maxDirectoriesNeeded" => -1,
+                                                                    "suffix" => "hadoop/hdfs/namenode"
+                                                                    ),
+                                             "dfs_data_dir" => array(
+                                                                    "displayName" => "DataNode directories",
+                                                                    "maxDirectoriesNeeded" => -1,
+                                                                    "suffix" => "hadoop/hdfs/data"
+                                                                    ),
+                                             "fs_checkpoint_dir" => array(
+                                                                    "displayName" => "SecondaryNameNode Checkpoint directory",
+                                                                    "maxDirectoriesNeeded" => 1,
+                                                                    "suffix" => "hadoop/hdfs/namesecondary"
+                                                                    )
+                                            ),
+                                "YARN" => array(
+                                             "yarn_nm_local_dirs" => array(
+                                                                    "displayName" => "YARN Application Container Directories",
+                                                                    "maxDirectoriesNeeded" => -1,
+                                                                    "suffix" => "hadoop/yarn"
+                                                                    ),
+                                             "yarn_nm_log_dirs" => array(
+                                                                    "displayName" => "YARN Application Container Log Directories",
+                                                                    "maxDirectoriesNeeded" => -1,
+                                                                    "suffix" => "hadoop/yarn"
+                                                                    ),
+                                            ),
                                 "MAPREDUCE" => array(
                                              "mapred_local_dir" => array(
                                                                     "displayName" => "MapReduce Data Directories",
@@ -158,6 +187,13 @@
                                                                     "suffix" => "hadoop/oozie"
                                                                    ),
                                             ),
+                                "OOZIE2" => array(
+                                             "oozie_data_dir" => array(
+                                                                    "displayName" => "Oozie DB Directory",
+                                                                    "maxDirectoriesNeeded" => 1,
+                                                                    "suffix" => "hadoop/oozie"
+                                                                   ),
+                                            ),
                                 "ZOOKEEPER" => array(
                                              "zk_data_dir" => array(
                                                                     "displayName" => "ZooKeeper Data Directory",
@@ -165,6 +201,13 @@
                                                                     "suffix" => "hadoop/zookeeper"
                                                                   ),
                                            ),
+                                "ZOOKEEPER2" => array(
+                                             "zk_data_dir" => array(
+                                                                    "displayName" => "ZooKeeper Data Directory",
+                                                                    "maxDirectoriesNeeded" => 1,
+                                                                    "suffix" => "hadoop/zookeeper"
+                                                                  ),
+                                           ),
                                 ),
             );
 
diff --git a/hmc/php/frontend/configUtils.php b/hmc/php/frontend/configUtils.php
index 23ad7f2..7eb68cd 100644
--- a/hmc/php/frontend/configUtils.php
+++ b/hmc/php/frontend/configUtils.php
@@ -413,6 +413,7 @@
       scheduler_name|org.apache.hadoop.mapred.CapacityTaskScheduler
       nagios_web_login|nagiosadmin
       nagios_web_password|admin
+      nagios_contact|nagiosadmin@hortonworks.com
       hadoop_heapsize|1024
       namenode_heapsize|1024
       namenode_opt_newsize|200
@@ -456,24 +457,51 @@
 function handleHiveMysql($clusterName, $dbAccessor, $logHandle) {
   $services = $dbAccessor->getAllServicesInfo($clusterName);
   $configs = $dbAccessor->getServiceConfig($clusterName);
-  $hostForMysql = $dbAccessor->getHostsForComponent($clusterName, "HIVE_MYSQL");
-  if ( ($services["services"]["HIVE"]["isEnabled"] == 1) &&
-       ( isset($configs["properties"])
-         && ( !isset($configs["properties"]["hive_mysql_host"])
-              || empty($configs["properties"]["hive_mysql_host"])) ) &&
-       (empty($hostForMysql["hosts"])) ) {
-    $logHandle->log_debug("Hive is enabled but mysql server is not set, set it up on hive server itself");
-    $hostComponents = $dbAccessor->getHostsForComponent($clusterName, "HIVE_SERVER");
-    $hiveServerHosts = array_keys($hostComponents["hosts"]);
-    $newConfig = array ( "hive_mysql_host" => "localhost" );  
-    $dbAccessor->updateServiceConfigs($clusterName, $newConfig);
-    $dbAccessor->addHostsToComponent($clusterName, "HIVE_MYSQL",
-          $hiveServerHosts, "ASSIGNED", "");
-  } else {
-    if (isset($configs["properties"])
-        && isset($configs["properties"]["hive_mysql_host"])
-        && $configs["properties"]["hive_mysql_host"] != "localhost") {
-      $dbAccessor->removeAllHostsFromComponent($clusterName, "HIVE_MYSQL");
+
+  $stackVersion = $dbAccessor->getHadoopStackVersion($clusterName);
+  $hadoopVersion = $stackVersion['version'];
+
+  if ($hadoopVersion == AMBARI_HADOOP_1) {
+    $hostForMysql = $dbAccessor->getHostsForComponent($clusterName, "HIVE_MYSQL");
+    if ( ($services["services"]["HIVE"]["isEnabled"] == 1) &&
+         ( isset($configs["properties"])
+           && ( !isset($configs["properties"]["hive_mysql_host"])
+                || empty($configs["properties"]["hive_mysql_host"])) ) &&
+         (empty($hostForMysql["hosts"])) ) {
+      $logHandle->log_debug("Hive is enabled but mysql server is not set, set it up on hive server itself");
+      $hostComponents = $dbAccessor->getHostsForComponent($clusterName, "HIVE_SERVER");
+      $hiveServerHosts = array_keys($hostComponents["hosts"]);
+      $newConfig = array ( "hive_mysql_host" => "localhost" );  
+      $dbAccessor->updateServiceConfigs($clusterName, $newConfig);
+      $dbAccessor->addHostsToComponent($clusterName, "HIVE_MYSQL",
+            $hiveServerHosts, "ASSIGNED", "");
+    } else {
+      if (isset($configs["properties"])
+          && isset($configs["properties"]["hive_mysql_host"])
+          && $configs["properties"]["hive_mysql_host"] != "localhost") {
+        $dbAccessor->removeAllHostsFromComponent($clusterName, "HIVE_MYSQL");
+      }
+    }
+  } else if ($hadoopVersion == AMBARI_HADOOP_2) {
+    $hostForMysql = $dbAccessor->getHostsForComponent($clusterName, "HIVE2_MYSQL");
+    if ( ($services["services"]["HIVE2"]["isEnabled"] == 1) &&
+         ( isset($configs["properties"])
+           && ( !isset($configs["properties"]["hive_mysql_host"])
+                || empty($configs["properties"]["hive_mysql_host"])) ) &&
+         (empty($hostForMysql["hosts"])) ) {
+      $logHandle->log_debug("Hive is enabled but mysql server is not set, set it up on hive server itself");
+      $hostComponents = $dbAccessor->getHostsForComponent($clusterName, "HIVE2_SERVER");
+      $hiveServerHosts = array_keys($hostComponents["hosts"]);
+      $newConfig = array ( "hive_mysql_host" => "localhost" );  
+      $dbAccessor->updateServiceConfigs($clusterName, $newConfig);
+      $dbAccessor->addHostsToComponent($clusterName, "HIVE2_MYSQL",
+            $hiveServerHosts, "ASSIGNED", "");
+    } else {
+      if (isset($configs["properties"])
+          && isset($configs["properties"]["hive_mysql_host"])
+          && $configs["properties"]["hive_mysql_host"] != "localhost") {
+        $dbAccessor->removeAllHostsFromComponent($clusterName, "HIVE2_MYSQL");
+      }
     }
   }
 }
diff --git a/hmc/php/frontend/createCluster.php b/hmc/php/frontend/createCluster.php
index 8e0a5b2..f170281 100644
--- a/hmc/php/frontend/createCluster.php
+++ b/hmc/php/frontend/createCluster.php
@@ -84,7 +84,7 @@
 $dbAccessor->wipeOutClusters($clusterName);
 $logger->log_info("Completed deletion of cluster: ".$clusterName);
 
-$hdpVersion="1.0"; // TODO: hardcoded
+$hdpVersion="2.0"; // TODO: hardcoded
 $state="Configuration in progress";
 
 $response = $dbAccessor->createCluster($clusterName, $hdpVersion, $state);
diff --git a/hmc/php/frontend/deploy.php b/hmc/php/frontend/deploy.php
index c503df9..27d93aa 100644
--- a/hmc/php/frontend/deploy.php
+++ b/hmc/php/frontend/deploy.php
@@ -106,7 +106,7 @@
 // check if nagios hosted on same server
 if (!$nagiosGangliaCoHosted) {
   // check if component mapped to this host
-  $hostMap = $dbAccessor->getHostsForComponent($clusterName, "NAGIOS_SERVER");
+  $hostMap = $dbAccessor->getHostsForComponent($clusterName, "NAGIOS_SERVER2");
   if (isset($hostMap["hosts"][$thisHostName])) {
     $nagiosGangliaCoHosted = TRUE;
   }
@@ -115,8 +115,16 @@
 // if still nothing then check if ganglia server installed on same server
 if (!$nagiosGangliaCoHosted) {
   // check if component mapped to this host
+  $stackVersion = $dbAccessor->getHadoopStackVersion($clusterName);
+  $hadoopVersion = $stackVersion['version'];
+  $gmServerName = "";
+  if($hadoopVersion == AMBARI_HADOOP_1) {
+    $gmServerName = "GANGLIA_MONITOR_SERVER";
+  } else if ($hadoopVersion == AMBARI_HADOOP_2) {
+    $gmServerName = "GANGLIA2_MONITOR_SERVER";
+  }
   $hostMap = $dbAccessor->getHostsForComponent($clusterName,
-      "GANGLIA_MONITOR_SERVER");
+      $gmServerName);
   if (isset($hostMap["hosts"][$thisHostName])) {
     $nagiosGangliaCoHosted = TRUE;
   }
diff --git a/hmc/php/orchestrator/Cluster.php b/hmc/php/orchestrator/Cluster.php
index 26cc977..6c7f7e7 100644
--- a/hmc/php/orchestrator/Cluster.php
+++ b/hmc/php/orchestrator/Cluster.php
@@ -345,7 +345,7 @@
       $result = $nagiosComp->stop($transaction->createSubTransaction(), $dryRun);
       if ($result["result"] != 0) {
         $this->setState(State::FAILED, $transaction, $dryRun);
-        $nagiosService = $this->db->getService("NAGIOS");
+        $nagiosService = $this->db->getService("NAGIOS2");
         if ($nagiosService !== FALSE) {
           $nagiosService->setState(STATE::FAILED,
               $transaction->createSubTransaction(), $dryRun, FALSE);
diff --git a/hmc/php/puppet/DBReader.php b/hmc/php/puppet/DBReader.php
index 386c2af..ace9261 100644
--- a/hmc/php/puppet/DBReader.php
+++ b/hmc/php/puppet/DBReader.php
@@ -33,6 +33,14 @@
     $this->logger = new HMCLogger("DBReader");
   }
 
+  public function getHadoopStackVersion($clusterName, $dbAccessor = NULL) {
+    if ($dbAccessor == NULL) {
+      $dbAccessor = new HMCDBAccessor($this->dbPath);
+    }
+    $ret = $dbAccessor->getHadoopStackVersion($clusterName);
+    return $ret["version"];
+  }
+
   public function getAllConfigs($clusterId) {
     $dbAccessor = new HMCDBAccessor($this->dbPath);
     $allConfigs = $dbAccessor->getServiceConfig($clusterId);
@@ -60,18 +68,20 @@
   public function getHostNames($clusterId) {
     $dbAccessor = new HMCDBAccessor($this->dbPath);
     $allHostComponents = $dbAccessor->getAllHostsByComponent($clusterId);
+    $hadoopVersion = $this->getHadoopStackVersion($clusterId, $dbAccessor);
+    $hostKeyMap = ($hadoopVersion == AMBARI_HADOOP_2) ? $this->hadoop2_hostKeyMap : $this->hadoop1_hostKeyMap;
     $hostNames = array();
     foreach ($allHostComponents["components"] as $componentName => $componentInfo) {
       if (!isset($componentInfo["hosts"])) {
         continue;
       }
       foreach($componentInfo["hosts"] as $hostname => $hostInfo) {
-        if (!isset($this->hostKeyMap[$componentName])) {
+        if (!isset($hostKeyMap[$componentName])) {
           $this->logger->log_trace("No puppet handling needed for  component "
               . $componentName);
           continue;
         }
-        $puppetHostKey = $this->hostKeyMap[$componentName];
+        $puppetHostKey = $hostKeyMap[$componentName];
         if (!isset($hostNames[$puppetHostKey])) {
           $hostNames[$puppetHostKey] = array();
         }
@@ -88,6 +98,8 @@
     $dbAccessor = new HMCDBAccessor($this->dbPath);
     $allHostComponents = $dbAccessor->getAllHostsByComponent($clusterId);
     $allHostRoleConfigs = $dbAccessor->getHostRoleConfigs($clusterId);
+    $hadoopVersion = $this->getHadoopStackVersion($clusterId, $dbAccessor);
+    $componentToPuppetClassMap = ($hadoopVersion == AMBARI_HADOOP_2) ? $this->hadoop2_componentToPuppetClassMap : $this->hadoop1_componentToPuppetClassMap;
     $hostRoles = array();
     foreach ($allHostComponents["components"] as $componentName => $componentInfo) {
       if (!isset($componentInfo["hosts"])) {
@@ -100,12 +112,12 @@
         if (!isset($hostRoles[$hostname])) {
           $hostRoles[$hostname] = array();
         }
-        if (!isset($this->componentToPuppetClassMap[$componentName])) {
+        if (!isset($componentToPuppetClassMap[$componentName])) {
           $this->logger->log_trace("No puppet handling needed for component "
               . $componentName);
           continue;
         }
-        $puppetClass = $this->componentToPuppetClassMap[$componentName];
+        $puppetClass = $componentToPuppetClassMap[$componentName];
         $hostRoles[$hostname][$puppetClass] = array();
         if (in_array($componentName, $components)) {
             $hostRoles[$hostname][$puppetClass]["service_state"] =
@@ -126,7 +138,7 @@
     return $hostRoles;
   }
 
-  private $componentToPuppetClassMap = array (
+  private $hadoop1_componentToPuppetClassMap = array (
       "NAMENODE" => "hdp-hadoop::namenode",
       "DATANODE"=> "hdp-hadoop::datanode",
       "SNAMENODE" => "hdp-hadoop::snamenode",
@@ -134,7 +146,7 @@
       "TASKTRACKER" => "hdp-hadoop::tasktracker",
       "HDFS_CLIENT" => "hdp-hadoop::client",
       "MAPREDUCE_CLIENT" => "hdp-hadoop::client",
-      "ZOOKEEPER_SERVER" => "hdp-zookeeper",
+  		"ZOOKEEPER_SERVER" => "hdp-zookeeper",
       "ZOOKEEPER_CLIENT" => "hdp-zookeeper::client",
       "HBASE_MASTER" => "hdp-hbase::master",
       "HBASE_REGIONSERVER" => "hdp-hbase::regionserver",
@@ -157,13 +169,45 @@
       "HTTPD" => "hdp-monitor-webserver"
     );
 
+  private $hadoop2_componentToPuppetClassMap = array (
+      "NAMENODE2" => "hdp2-hadoop::namenode",
+      "DATANODE2"=> "hdp2-hadoop::datanode",
+      "SNAMENODE2" => "hdp2-hadoop::snamenode",
+      "RESOURCEMANAGER" => "hdp2-hadoop::resourcemanager",
+      "NODEMANAGER" => "hdp2-hadoop::nodemanager",
+      "HDFS2_CLIENT" => "hdp2-hadoop::client",
+      "YARN_CLIENT" => "hdp2-hadoop::client",
+  		"MAPREDUCE2_CLIENT" => "hdp2-hadoop::client",
+  		"ZOOKEEPER2_SERVER" => "hdp2-zookeeper",
+      "ZOOKEEPER2_CLIENT" => "hdp2-zookeeper::client",
+      "HBASE2_MASTER" => "hdp2-hbase::master",
+      "HBASE2_REGIONSERVER" => "hdp2-hbase::regionserver",
+      "HBASE2_CLIENT" => "hdp2-hbase::client",
+      "PIG2_CLIENT" => "hdp2-pig",
+      "SQOOP_CLIENT" => "hdp-sqoop",
+      "OOZIE2_SERVER" => "hdp2-oozie::server",
+      "OOZIE2_CLIENT" => "hdp2-oozie::client",
+      "HIVE2_CLIENT" => "hdp2-hive::client",
+      "HCATALOG2_CLIENT" => "hdp2-hcat",
+      "HCATALOG2_SERVER" => "hdp2-hcat::server",
+      "HIVE2_SERVER" => "hdp2-hive::server",
+      "HIVE2_MYSQL" => "hdp2-mysql::server",
+      "TEMPLETON_SERVER" => "hdp-templeton::server",
+      "TEMPLETON_CLIENT" => "hdp-templeton::client",
+      "DASHBOARD" => "hdp-dashboard",
+      "NAGIOS_SERVER2" => "hdp2-nagios::server",
+      "GANGLIA2_MONITOR_SERVER" => "hdp2-ganglia::server",
+      "GANGLIA2_MONITOR" => "hdp2-ganglia::monitor",
+      "HTTPD" => "hdp-monitor-webserver"
+    );
+
   //Store the database key for each role
-  private $hostKeyMap =
+  private $hadoop1_hostKeyMap =
     array(
         "NAMENODE" => "namenode_host",
         "JOBTRACKER" => "jtnode_host",
         "SNAMENODE" => "snamenode_host",
-        "ZOOKEEPER_SERVER" => "zookeeper_hosts",
+    		"ZOOKEEPER_SERVER" => "zookeeper_hosts",
         "HBASE_MASTER" => "hbase_master_host",
         "HCATALOG_SERVER" => "hcat_server_host",
         "HIVE_SERVER" => "hive_server_host",
@@ -177,6 +221,25 @@
         "HBASE_REGIONSERVER" => "hbase_rs_hosts"
         );
 
+  private $hadoop2_hostKeyMap =
+    array(
+        "NAMENODE2" => "namenode_host",
+        "RESOURCEMANAGER" => "yarn_rm_host",
+        "SNAMENODE2" => "snamenode_host",
+    		"ZOOKEEPER2_SERVER" => "zookeeper_hosts",
+        "HBASE2_MASTER" => "hbase_master_host",
+        "HCATALOG2_SERVER" => "hcat_server_host",
+        "HIVE2_SERVER" => "hive_server_host",
+        "OOZIE2_SERVER" => "oozie_server",
+        "TEMPLETON_SERVER" => "templeton_server_host",
+        "DASHBOARD" => "dashboard_host",
+        "NAGIOS_SERVER2" => "nagios_server_host",
+        "GANGLIA2_MONITOR_SERVER" => "ganglia_server_host",
+        "DATANODE2" => "slave_hosts",
+        "NODEMANAGER" => "slave_hosts",
+        "HBASE2_REGIONSERVER" => "hbase_rs_hosts"
+        );
+
   /****** Not used but may be used later *****
   //Store the database key for each configuration
   private $configKeyMap =
diff --git a/hmc/php/puppet/PuppetInvoker.php b/hmc/php/puppet/PuppetInvoker.php
index 86c8d38..716828a 100644
--- a/hmc/php/puppet/PuppetInvoker.php
+++ b/hmc/php/puppet/PuppetInvoker.php
@@ -183,7 +183,8 @@
       $txnId = $txnObj->toString();
 
       $hostRolesStates = array();
-      $roleDependencies = new RoleDependencies();
+      $dbReader = new DBReader($this->db, $clusterName);
+      $roleDependencies = new RoleDependencies($dbReader->getHadoopStackVersion($clusterName));
       $nodeList = array();
       foreach($serviceCheckNodes as $service => $node) {
         $rs = $roleDependencies->getServiceCheckRole($service);
@@ -199,7 +200,6 @@
         $hostRolesStates[$node][$rs] = array();
       }
       $nodesToKick = array_unique($nodeList);
-      $dbReader = new DBReader($this->db);
       $hostInfo = $dbReader->getHostNames($clusterName);
       $configInfo = $dbReader->getAllConfigs($clusterName);
       $hostAttributes = $dbReader->getAllHostAttributes($clusterName);
@@ -258,8 +258,9 @@
       copy($GLOBALS["manifestloaderDir"] . "/site.pp", $GLOBALS["manifestloaderDestinationDir"] . "/site.pp");
 
       //Generate manifest
+       $dbReader = new DBReader($this->db);
       $agentModulesDir = $GLOBALS["puppetAgentModulesDirectory"];
-      ManifestGenerator::generateManifest($manifestDir, $hostInfo,
+      ManifestGenerator::generateManifest($dbReader, $clusterId, $manifestDir, $hostInfo,
           $configInfo, $hostRolesStates, $hostAttributes, $agentModulesDir);
 
       //Write version file
@@ -267,6 +268,11 @@
 
       if ($dryRun) {
         $successfullNodes = $nodes;
+        
+        //Archive
+        $sitePPFile = $manifestDir . "/site.pp";
+        system("mv " . $sitePPFile . " " . $GLOBALS["manifestloaderDir"] . "/site.pp-" . $txnId);
+        
         return $this->createGenKickWaitResponse($kickFailedNodes, $failureResponseNodes,
            $timedoutNodes, $successfullNodes, $nodes);
       }
@@ -333,7 +339,7 @@
       while (true) {
         $this->checkForReportFiles($nodes, $txnId, $doneNodes);
         $this->logger->log_info(count($doneNodes) . " out of " . count($nodes) 
-            . " nodes have reported");
+            . " nodes have reported for txn " . $txnId);
         if (count($doneNodes) >= count($nodes)) {
           ##All nodes kicked have reported back
           break;
diff --git a/hmc/php/puppet/genmanifest/PuppetClassDependencies.php b/hmc/php/puppet/genmanifest/PuppetClassDependencies.php
index 4e1cef4..40bdf08 100644
--- a/hmc/php/puppet/genmanifest/PuppetClassDependencies.php
+++ b/hmc/php/puppet/genmanifest/PuppetClassDependencies.php
@@ -47,12 +47,15 @@
 
     //HDFS
     $this->addDependency("hdp-hadoop::namenode", SERVICE_STATE_RUNNING, "hdp-hadoop::namenode::service_check", array());
-
+    $this->addDependency("hdp2-hadoop::namenode", SERVICE_STATE_RUNNING, "hdp2-hadoop::namenode::service_check", array());

+    
     //MapReduce
     $this->addDependency("hdp-hadoop::jobtracker", SERVICE_STATE_RUNNING, "hdp-hadoop::jobtracker::service_check", array());
+    $this->addDependency("hdp2-hadoop::resourcemanager", SERVICE_STATE_RUNNING, "hdp2-hadoop::resourcemanager::service_check", array());
 
     //ZooKeeper
     $this->addDependency("hdp-zookeeper", SERVICE_STATE_RUNNING, "hdp-zookeeper::zookeeper::service_check", array());
+    $this->addDependency("hdp2-zookeeper", SERVICE_STATE_RUNNING, "hdp2-zookeeper::zookeeper::service_check", array());
 
     //HBase
     $this->addDependency("hdp-hbase::master", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-zookeeper::client",
@@ -60,31 +63,61 @@
     $this->addDependency("hdp-hbase::master", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
 
+    $this->addDependency("hdp2-hbase::master", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-zookeeper::client",
+        array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
+    $this->addDependency("hdp2-hbase::master", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
+
     $this->addDependency("hdp-hbase::master", SERVICE_STATE_RUNNING, "hdp-zookeeper::client",
         array("service_state" => SERVICE_STATE_NO_OP));
     $this->addDependency("hdp-hbase::master", SERVICE_STATE_RUNNING, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_NO_OP));
 
+    $this->addDependency("hdp2-hbase::master", SERVICE_STATE_RUNNING, "hdp2-zookeeper::client",
+        array("service_state" => SERVICE_STATE_NO_OP));
+    $this->addDependency("hdp2-hbase::master", SERVICE_STATE_RUNNING, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_NO_OP));
+
     $this->addDependency("hdp-hbase::master", SERVICE_STATE_UNINSTALLED, "hdp-zookeeper::client",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
     $this->addDependency("hdp-hbase::master", SERVICE_STATE_UNINSTALLED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
 
+    $this->addDependency("hdp2-hbase::master", SERVICE_STATE_UNINSTALLED, "hdp2-zookeeper::client",
+        array("service_state" => SERVICE_STATE_UNINSTALLED));
+    $this->addDependency("hdp2-hbase::master", SERVICE_STATE_UNINSTALLED, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_UNINSTALLED));
+
     $this->addDependency("hdp-hbase::regionserver", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-zookeeper::client",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
     $this->addDependency("hdp-hbase::regionserver", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
 
+    $this->addDependency("hdp2-hbase::regionserver", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-zookeeper::client",
+        array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
+    $this->addDependency("hdp2-hbase::regionserver", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
+
     $this->addDependency("hdp-hbase::regionserver", SERVICE_STATE_RUNNING, "hdp-zookeeper::client",
         array("service_state" => SERVICE_STATE_NO_OP));
     $this->addDependency("hdp-hbase::regionserver", SERVICE_STATE_RUNNING, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_NO_OP));
 
+    $this->addDependency("hdp2-hbase::regionserver", SERVICE_STATE_RUNNING, "hdp2-zookeeper::client",
+        array("service_state" => SERVICE_STATE_NO_OP));
+    $this->addDependency("hdp2-hbase::regionserver", SERVICE_STATE_RUNNING, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_NO_OP));
+
     $this->addDependency("hdp-hbase::regionserver", SERVICE_STATE_UNINSTALLED, "hdp-zookeeper::client",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
     $this->addDependency("hdp-hbase::regionserver", SERVICE_STATE_UNINSTALLED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
 
+    $this->addDependency("hdp2-hbase::regionserver", SERVICE_STATE_UNINSTALLED, "hdp2-zookeeper::client",
+        array("service_state" => SERVICE_STATE_UNINSTALLED));
+    $this->addDependency("hdp2-hbase::regionserver", SERVICE_STATE_UNINSTALLED, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_UNINSTALLED));
+
     //Oozie
     $this->addDependency("hdp-oozie::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
@@ -93,6 +126,13 @@
     $this->addDependency("hdp-oozie::server", SERVICE_STATE_UNINSTALLED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
 
+    $this->addDependency("hdp2-oozie::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
+    $this->addDependency("hdp2-oozie::server", SERVICE_STATE_RUNNING, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_NO_OP));
+    $this->addDependency("hdp2-oozie::server", SERVICE_STATE_UNINSTALLED, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_UNINSTALLED));
+
     //Hive depends on Hcat
     $this->addDependency("hdp-hive::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-hcat",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
@@ -106,10 +146,25 @@
     $this->addDependency("hdp-hive::client", SERVICE_STATE_UNINSTALLED, "hdp-hcat",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
 
+    $this->addDependency("hdp2-hive::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-hcat",
+        array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
+    $this->addDependency("hdp2-hive::server", SERVICE_STATE_RUNNING, "hdp2-hcat",
+        array("service_state" => SERVICE_STATE_NO_OP));
+    $this->addDependency("hdp2-hive::server", SERVICE_STATE_UNINSTALLED, "hdp2-hcat",
+        array("service_state" => SERVICE_STATE_UNINSTALLED));
+
+    $this->addDependency("hdp2-hive::client", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-hcat",
+        array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
+    $this->addDependency("hdp2-hive::client", SERVICE_STATE_UNINSTALLED, "hdp2-hcat",
+        array("service_state" => SERVICE_STATE_UNINSTALLED));
+
     //Hive Service Check
     $this->addDependency("hdp-hive::hive::service_check", SERVICE_STATE_NOT_APPLICABLE, 
         "hdp-hcat::hcat::service_check", array());
 
+    $this->addDependency("hdp2-hive::hive::service_check", SERVICE_STATE_NOT_APPLICABLE, 
+        "hdp2-hcat::hcat::service_check", array());
+
     //Pig
     $this->addDependency("hdp-pig", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
@@ -117,6 +172,12 @@
     $this->addDependency("hdp-pig", SERVICE_STATE_UNINSTALLED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
 
+    $this->addDependency("hdp2-pig", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
+
+    $this->addDependency("hdp2-pig", SERVICE_STATE_UNINSTALLED, "hdp2-hadoop::client",
+        array("service_state" => SERVICE_STATE_UNINSTALLED));
+
     //Sqoop
     $this->addDependency("hdp-sqoop", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-hadoop::client",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
@@ -157,31 +218,38 @@
     $this->addDependency("hdp-ganglia::server", SERVICE_STATE_RUNNING, "hdp-ganglia::hdp-gmetad::service_check", array());
     $this->addDependency("hdp-ganglia::server", SERVICE_STATE_RUNNING, "hdp-monitor-webserver", array());
 
+    $this->addDependency("hdp2-ganglia::monitor", SERVICE_STATE_RUNNING, "hdp2-ganglia::hdp-gmond::service_check", array());
+    $this->addDependency("hdp2-ganglia::server", SERVICE_STATE_RUNNING, "hdp2-ganglia::hdp-gmetad::service_check", array());
+    $this->addDependency("hdp2-ganglia::server", SERVICE_STATE_RUNNING, "hdp-monitor-webserver", array());
+
     //Nagios
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-oozie::client",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-oozie::client",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-hcat",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-hcat",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp-hive::client",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_INSTALLED_AND_CONFIGURED, "hdp2-hive::client",
         array("service_state" => SERVICE_STATE_INSTALLED_AND_CONFIGURED));
 
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_RUNNING, "hdp-oozie::client",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_RUNNING, "hdp2-oozie::client",
         array("service_state" => SERVICE_STATE_NO_OP));
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_RUNNING, "hdp-hcat",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_RUNNING, "hdp2-hcat",
         array("service_state" => SERVICE_STATE_NO_OP));
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_RUNNING, "hdp-hive::client",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_RUNNING, "hdp2-hive::client",
         array("service_state" => SERVICE_STATE_NO_OP));
 
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_UNINSTALLED, "hdp-oozie::client",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_UNINSTALLED, "hdp2-oozie::client",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_UNINSTALLED, "hdp-hcat",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_UNINSTALLED, "hdp2-hcat",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
-    $this->addDependency("hdp-nagios::server", SERVICE_STATE_UNINSTALLED, "hdp-hive::client",
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_UNINSTALLED, "hdp2-hive::client",
         array("service_state" => SERVICE_STATE_UNINSTALLED));
 
     $this->addDependency("hdp-nagios::server", SERVICE_STATE_RUNNING, "hdp-nagios::nagios::service_check", array());
     $this->addDependency("hdp-nagios::server", SERVICE_STATE_RUNNING, "hdp-monitor-webserver", array());
 
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_RUNNING, "hdp2-nagios::nagios::service_check", array());
+    $this->addDependency("hdp2-nagios::server", SERVICE_STATE_RUNNING, "hdp-monitor-webserver", array());
+
     //Dashboard
     $this->addDependency("hdp-dashboard", SERVICE_STATE_RUNNING, "hdp-dashboard::dashboard::service_check", array());
   }
@@ -205,7 +273,7 @@
   }
 
   public function getHostLevelDependencies($role, $serviceState) {
-    $dependencies = array("hdp" => array() );
+    $dependencies = array("hdp2" => array() );
     if (!isset($this->hostLevelDependencies[$role][$serviceState])) {
       return $dependencies;
     }
diff --git a/hmc/php/puppet/genmanifest/RoleDependencies.php b/hmc/php/puppet/genmanifest/RoleDependencies.php
index 1c196dc..0ddcaf2 100644
--- a/hmc/php/puppet/genmanifest/RoleDependencies.php
+++ b/hmc/php/puppet/genmanifest/RoleDependencies.php
@@ -31,68 +31,133 @@
     private $classDependencies;
     private $logger;
 
-    function __construct() {
+    private static $hadoop1_sortedRoles = array (
+      "hdp",
+      "hdp-hadoop::namenode",
+      "hdp-hadoop::namenode::service_check",
+      "hdp-hadoop::snamenode",
+      "hdp-hadoop::datanode",
+      "hdp-hadoop::client",
+      "hdp-hadoop::hdfs::service_check",
+      "hdp-hadoop::jobtracker",
+      "hdp-hadoop::jobtracker::service_check",
+      "hdp-hadoop::tasktracker",
+      "hdp-hadoop::mapred::service_check",
+      "hdp-zookeeper",
+      "hdp-zookeeper::client",
+      "hdp-zookeeper::zookeeper::service_check",
+      "hdp-zookeeper::quorum::service_check",
+      "hdp-hbase::master",
+      "hdp-hbase::regionserver",
+      "hdp-hbase::client",
+      "hdp-hbase::hbase::service_check",
+      "hdp-mysql::server",
+      "hdp-hive::server",
+      "hdp-hive::client",
+      "hdp-hive::hive::service_check",
+      "hdp-hcat",
+      "hdp-hcat::hcat::service_check",
+      "hdp-oozie::server",
+      "hdp-oozie::client",
+      "hdp-oozie::oozie::service_check",
+      "hdp-pig",
+      "hdp-pig::pig::service_check",
+      "hdp-sqoop",
+      "hdp-sqoop::sqoop::service_check",
+      "hdp-templeton::server",
+      "hdp-templeton::client",
+      "hdp-templeton::templeton::service_check",
+      "hdp-dashboard",
+      "hdp-dashboard::dashboard::service_check",
+      "hdp-nagios::server",
+      "hdp-nagios::nagios::service_check",
+      "hdp-ganglia::server",
+      "hdp-ganglia::monitor",
+      "hdp-ganglia::hdp-gmond::service_check",
+      "hdp-ganglia::hdp-gmetad::service_check",
+      "hdp-monitor-webserver",
+    );
+
+    private static $hadoop2_sortedRoles = array (
+      "hdp2",
+      "hdp2-hadoop::namenode",
+      "hdp2-hadoop::namenode::service_check",
+      "hdp2-hadoop::snamenode",
+      "hdp2-hadoop::datanode",
+      "hdp2-hadoop::client",
+      "hdp2-hadoop::hdfs::service_check",
+      "hdp2-hadoop::resourcemanager",
+      "hdp2-hadoop::resourcemanager::service_check",
+      "hdp2-hadoop::nodemanager",
+      "hdp2-hadoop::mapred::service_check",
+      "hdp2-zookeeper",
+      "hdp2-zookeeper::client",
+      "hdp2-zookeeper::zookeeper::service_check",
+      "hdp2-zookeeper::quorum::service_check",
+      "hdp2-hbase::master",
+      "hdp2-hbase::regionserver",
+      "hdp2-hbase::client",
+      "hdp2-hbase::hbase::service_check",
+      "hdp2-mysql::server",
+      "hdp2-hive::server",
+      "hdp2-hive::client",
+      "hdp2-hive::hive::service_check",
+      "hdp2-hcat",
+      "hdp2-hcat::hcat::service_check",
+      "hdp2-oozie::server",
+      "hdp2-oozie::client",
+      "hdp2-oozie::oozie::service_check",
+      "hdp2-pig",
+      "hdp2-pig::pig::service_check",
+      "hdp-sqoop",
+      "hdp-sqoop::sqoop::service_check",
+      "hdp-templeton::server",
+      "hdp-templeton::client",
+      "hdp-templeton::templeton::service_check",
+      "hdp-dashboard",
+      "hdp-dashboard::dashboard::service_check",
+      "hdp2-nagios::server",
+      "hdp2-nagios::nagios::service_check",
+      "hdp2-ganglia::server",
+      "hdp2-ganglia::monitor",
+      "hdp2-ganglia::hdp-gmond::service_check",
+      "hdp2-ganglia::hdp-gmetad::service_check",
+      "hdp-monitor-webserver",
+    );
+
+    private static $hadoop1_serviceCheckRoles = array (
+      "HDFS" => "hdp-hadoop::hdfs::service_check",
+      "MAPREDUCE" => "hdp-hadoop::mapred::service_check",
+      "ZOOKEEPER" => "hdp-zookeeper::quorum::service_check",
+      "HBASE" => "hdp-hbase::hbase::service_check",
+      "PIG" => "hdp-pig::pig::service_check",
+      "SQOOP" => "hdp-sqoop::sqoop::service_check",
+      "HCATALOG" => "hdp-hcat::hcat::service_check",
+      "HIVE" => "hdp-hive::hive::service_check",
+      "OOZIE" => "hdp-oozie::oozie::service_check",
+      "TEMPLETON" => "hdp-templeton::templeton::service_check",
+    );
+
+    private static $hadoop2_serviceCheckRoles = array (
+      "HDFS2" => "hdp2-hadoop::hdfs::service_check",
+      "YARN" => "hdp2-hadoop::mapred::service_check",
+      "MAPREDUCE2" => "hdp2-hadoop::mapred::service_check",
+      "ZOOKEEPER2" => "hdp2-zookeeper::quorum::service_check",
+      "HBASE2" => "hdp2-hbase::hbase::service_check",
+      "PIG2" => "hdp2-pig::pig::service_check",
+      "SQOOP" => "hdp-sqoop::sqoop::service_check",
+      "HCATALOG2" => "hdp2-hcat::hcat::service_check",
+      "HIVE2" => "hdp2-hive::hive::service_check",
+      "OOZIE2" => "hdp2-oozie::oozie::service_check",
+      "TEMPLETON" => "hdp-templeton::templeton::service_check",
+    );
+
+    function __construct($hadoopStackVersion = AMBARI_HADOOP_1) {
        $this->logger = new HMCLogger("RoleDependencies");
        $this->classDependencies = new PuppetClassDependencies();
-	     $this->sortedRoles = array (
-	      "hdp",
-	      "hdp-hadoop::namenode",
-	      "hdp-hadoop::namenode::service_check",
-	      "hdp-hadoop::snamenode",
-	      "hdp-hadoop::datanode",
-	      "hdp-hadoop::client",
-	      "hdp-hadoop::hdfs::service_check",
-	      "hdp-hadoop::jobtracker",
-	      "hdp-hadoop::jobtracker::service_check",
-	      "hdp-hadoop::tasktracker",
-	      "hdp-hadoop::mapred::service_check",
-	      "hdp-zookeeper",
-	      "hdp-zookeeper::client",
-	      "hdp-zookeeper::zookeeper::service_check",
-	      "hdp-zookeeper::quorum::service_check",
-	      "hdp-hbase::master",
-	      "hdp-hbase::regionserver",
-	      "hdp-hbase::client",
-	      "hdp-hbase::hbase::service_check",
-	      "hdp-mysql::server",
-        "hdp-hive::server",
-        "hdp-hive::client",
-        "hdp-hive::hive::service_check",
-        "hdp-hcat",
-        "hdp-hcat::hcat::service_check",
-	      "hdp-oozie::server",
-	      "hdp-oozie::client",
-	      "hdp-oozie::oozie::service_check",
-	      "hdp-pig",
-	      "hdp-pig::pig::service_check",
-	      "hdp-sqoop",
-	      "hdp-sqoop::sqoop::service_check",
-	      "hdp-templeton::server",
-	      "hdp-templeton::client",
-	      "hdp-templeton::templeton::service_check",
-        "hdp-dashboard",
-        "hdp-dashboard::dashboard::service_check",
-        "hdp-nagios::server",
-        "hdp-nagios::nagios::service_check",
-        "hdp-ganglia::server",
-        "hdp-ganglia::monitor",
-        "hdp-ganglia::hdp-gmond::service_check",
-        "hdp-ganglia::hdp-gmetad::service_check",
-        "hdp-monitor-webserver",
-	    );
-
-      $this->serviceCheckRoles = array (
-        "HDFS" => "hdp-hadoop::hdfs::service_check",
-        "MAPREDUCE" => "hdp-hadoop::mapred::service_check",
-        "ZOOKEEPER" => "hdp-zookeeper::quorum::service_check",
-        "HBASE" => "hdp-hbase::hbase::service_check",
-        "PIG" => "hdp-pig::pig::service_check",
-        "SQOOP" => "hdp-sqoop::sqoop::service_check",
-        "HCATALOG" => "hdp-hcat::hcat::service_check",
-        "HIVE" => "hdp-hive::hive::service_check",
-        "OOZIE" => "hdp-oozie::oozie::service_check",
-        "TEMPLETON" => "hdp-templeton::templeton::service_check",
-      );
+       $this->sortedRoles = ($hadoopStackVersion == AMBARI_HADOOP_2) ? self::$hadoop2_sortedRoles : self::$hadoop1_sortedRoles; 
+       $this->serviceCheckRoles = ($hadoopStackVersion == AMBARI_HADOOP_2) ? self::$hadoop2_serviceCheckRoles : self::$hadoop1_serviceCheckRoles; 
+       error_log ("hadoopStackVersion = $hadoopStackVersion");
     }
 
     public function getRolesStages($rolesList) {
diff --git a/hmc/php/puppet/genmanifest/generateManifest.php b/hmc/php/puppet/genmanifest/generateManifest.php
index ffdd125..a829c35 100644
--- a/hmc/php/puppet/genmanifest/generateManifest.php
+++ b/hmc/php/puppet/genmanifest/generateManifest.php
@@ -26,7 +26,7 @@
 include "RoleDependencies.php";
 
  class ManifestGenerator {
-   public static function generateManifest($dir, $hostInfo,
+   public static function generateManifest($dbReader, $clusterId, $dir, $hostInfo,
        $configInfo, $hostRoles, $hostAttributes, $modulesDir) {
      $sitePPFile = $dir . "/site.pp";
      //Delete file if exists
@@ -35,7 +35,12 @@
      }
      $fh = fopen($dir . "/site.pp", "w");
 
-     $importString = ManifestGenerator::getAllImports($modulesDir);
+     $hadoopVersion = $dbReader->getHadoopStackVersion($clusterId);
+     if ($hadoopVersion == AMBARI_HADOOP_1) {
+       $importString = ManifestGenerator::getAllImportsV1($modulesDir);
+     } else {
+       $importString = ManifestGenerator::getAllImportsV2($modulesDir);
+     }
      fwrite($fh, $importString);
 
      $hostsConfig = HostsConfigManifest::getHostsConfigManifest($hostInfo,
@@ -47,7 +52,7 @@
          $hostAttributes);
      fwrite($fh, $hostsAttributeManifest . "\n");
 
-     $roleDepObj = new RoleDependencies();
+     $roleDepObj = new RoleDependencies($hadoopVersion);
      foreach ($hostRoles as $n => $rolesStates) {
        $nm = new NodeManifest($n);
        $roleList = array_keys($rolesStates);
@@ -80,7 +85,7 @@
            }
          }
        }
-       ManifestGenerator::optimizePackageInstall($n, $nm, $rolesStates, $configInfo, $roleStages);
+       //ManifestGenerator::optimizePackageInstall($n, $nm, $rolesStates, $configInfo, $roleStages);
        $nodeManifestString = $nm->generateNodeManifest();
        fwrite($fh, $nodeManifestString . "\n");
      }
@@ -167,7 +172,8 @@
        }
        //Add in the order of the stages
        $stateList = $rolesStatesDs[$r];
-       if ($stateList[SERVICE_STATE_KEY] != $serviceState) {
+       if (!isset($stateList[SERVICE_STATE_KEY])
+           || $stateList[SERVICE_STATE_KEY] != $serviceState) {
          continue;
        }
        if (isset(self::$rolesToPackageMap[$r])) {
@@ -216,7 +222,7 @@
      $manifestObj->setRoleState("hdp", "pre_installed_pkgs", $pList);
    }
 
-   private static function getAllImports($modulesDir) {
+   private static function getAllImportsV1($modulesDir) {
      $importString = "";
      $importString = $importString . "import \"" . $modulesDir . "/hdp/manifests/*.pp" ."\"\n";
      $importString = $importString . "import \"" . $modulesDir . "/hdp-hadoop/manifests/*.pp" ."\"\n";
@@ -233,6 +239,24 @@
      return $importString;
    }
 
+   private static function getAllImportsV2($modulesDir) {
+     $importString = "";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2-hadoop/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2-hbase/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2-zookeeper/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2-oozie/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2-pig/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp-sqoop/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp-templeton/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2-hive/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2-hcat/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp2-mysql/manifests/*.pp" ."\"\n";
+     $importString = $importString . "import \"" . $modulesDir . "/hdp-monitor-webserver/manifests/*.pp" ."\"\n";
+     return $importString;
+   }
+
    private static $rolesToPackageMap = array (
        "hdp-hadoop::namenode" => array ("hadoop", "hadoop-libhdfs.x86_64", "hadoop-native.x86_64", "hadoop-pipes.x86_64",
             "hadoop-sbin.x86_64", "hadoop-lzo"),
@@ -261,7 +285,7 @@
        "hdp-oozie::server" => array("oozie.noarch", "extjs-2.2-1"),
        "hdp-oozie::client" => array("oozie-client.noarch"),
        "hdp-mysql::server" => array("mysql-server"),
-       "hdp-templeton::server" => array("templeton", "templeton-tar-pig-0.0.1.14-1", "templeton-tar-hive-0.0.1.14-1"),
+       "hdp-templeton::server" => array("templeton", "templeton-tar-pig-0.0.1.15-1", "templeton-tar-hive-0.0.1.15-1"),
        "hdp-templeton::client" => array("templeton"),
        "lzo" => array("lzo", "lzo", "lzo-devel", "lzo-devel"),
        "snappy" => array("snappy", "snappy-devel"),
diff --git a/hmc/php/util/selectNodes.php b/hmc/php/util/selectNodes.php
index bff01da..3bf41ac 100644
--- a/hmc/php/util/selectNodes.php
+++ b/hmc/php/util/selectNodes.php
@@ -49,30 +49,35 @@
   /**
    * Helper function to add HDFS NameNode
    */
-  function addNameNode($serviceInfo, $result, $hostInfo) {
-    if (array_key_exists("HDFS", $serviceInfo)) {
-
+  function addNameNode($hadoopVersion, $serviceInfo, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1 && array_key_exists("HDFS", $serviceInfo)) {
       $result["mastersToHosts"]["NAMENODE"] = $this->createHostMap($hostInfo);
+    } else if ($hadoopVersion == AMBARI_HADOOP_2 && array_key_exists("HDFS2", $serviceInfo)) {
+      $result["mastersToHosts"]["NAMENODE2"] = $this->createHostMap($hostInfo);
     }
     return $result;
   }
 
   /**
-   * Helper function to add SNameNode
+   * Helper function to add HDFS SNameNode
    */
-  function addSNameNode($serviceInfo, $result, $hostInfo) {
-    if (array_key_exists("HDFS", $serviceInfo)) {
+  function addSNameNode($hadoopVersion, $serviceInfo, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1 && array_key_exists("HDFS", $serviceInfo)) {
       $result["mastersToHosts"]["SNAMENODE"] = $this->createHostMap($hostInfo);
+    } else if ($hadoopVersion == AMBARI_HADOOP_2 && array_key_exists("HDFS2", $serviceInfo)) {
+      $result["mastersToHosts"]["SNAMENODE2"] = $this->createHostMap($hostInfo);
     }
     return $result;
   }
 
   /**
-   * Helper function to add JobTracker.
+   * Helper function to add Cluster ResourceManager 
    */
-  function addJobTracker($serviceInfo, $result, $hostInfo) {
-    if (array_key_exists("MAPREDUCE", $serviceInfo)) {
+  function addResourceManager($hadoopVersion, $serviceInfo, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1 && array_key_exists("MAPREDUCE", $serviceInfo)) {
       $result["mastersToHosts"]["JOBTRACKER"] = $this->createHostMap($hostInfo);
+    } else if ($hadoopVersion == AMBARI_HADOOP_2 && array_key_exists("YARN", $serviceInfo)) {
+      $result["mastersToHosts"]["RESOURCEMANAGER"] = $this->createHostMap($hostInfo);
     }
     return $result;
   }
@@ -80,9 +85,11 @@
   /**
    * Helper function to add HBase Master.
    */
-  function addHBaseMaster($serviceInfo, $result, $hostInfo) {
-    if (array_key_exists("HBASE", $serviceInfo)) {
+  function addHBaseMaster($hadoopVersion, $serviceInfo, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1 && array_key_exists("HBASE", $serviceInfo)) {
       $result["mastersToHosts"]["HBASE_MASTER"] = $this->createHostMap($hostInfo);
+    } else if ($hadoopVersion == AMBARI_HADOOP_2 && array_key_exists("HBASE2", $serviceInfo)) {
+      $result["mastersToHosts"]["HBASE2_MASTER"] = $this->createHostMap($hostInfo);
     }
     return $result;
   }
@@ -90,9 +97,11 @@
   /**
    * Helper function to add Oozie server.
    */
-  function addOozieServer($serviceInfo, $result, $hostInfo) {
-    if (array_key_exists("OOZIE", $serviceInfo)) {
+  function addOozieServer($hadoopVersion, $serviceInfo, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1 && array_key_exists("OOZIE", $serviceInfo)) {
       $result["mastersToHosts"]["OOZIE_SERVER"] = $this->createHostMap($hostInfo);
+    } else if ($hadoopVersion == AMBARI_HADOOP_2 && array_key_exists("OOZIE2", $serviceInfo)) {
+      $result["mastersToHosts"]["OOZIE2_SERVER"] = $this->createHostMap($hostInfo);
     }
     return $result;
   }
@@ -100,9 +109,11 @@
   /**
    * Helper function to add Hive Server.
    */
-  function addHiveServer($serviceInfo, $result, $hostInfo) {
-    if (array_key_exists("HIVE", $serviceInfo)) {
+  function addHiveServer($hadoopVersion, $serviceInfo, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1 && array_key_exists("HIVE", $serviceInfo)) {
       $result["mastersToHosts"]["HIVE_SERVER"] = $this->createHostMap($hostInfo);
+    } else if ($hadoopVersion == AMBARI_HADOOP_2 && array_key_exists("HIVE2", $serviceInfo)) {
+      $result["mastersToHosts"]["HIVE2_SERVER"] = $this->createHostMap($hostInfo);
     }
     return $result;
   }
@@ -110,7 +121,7 @@
   /**
    * Helper function to add Templeton.
    */
-  function addTempletonServer($serviceInfo, $result, $hostInfo) {
+  function addTempletonServer($hadoopVersion, $serviceInfo, $result, $hostInfo) {
     if (array_key_exists("TEMPLETON", $serviceInfo)) {
       $result["mastersToHosts"]["TEMPLETON_SERVER"] = $this->createHostMap($hostInfo);
     }
@@ -120,14 +131,22 @@
   /**
    * Helper function to add ZooKeeper master server.
    */
-  function addZooKeeperServer($serviceInfo, $result, $hostInfo) {
-    if (array_key_exists("ZOOKEEPER", $serviceInfo)) { 
+  function addZooKeeperServer($hadoopVersion, $serviceInfo, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1 && array_key_exists("ZOOKEEPER", $serviceInfo)) { 
       if (array_key_exists("ZOOKEEPER_SERVER", $result["mastersToHosts"])) {
         array_push($result["mastersToHosts"]["ZOOKEEPER_SERVER"]["hostNames"],
             $hostInfo["hostName"]);
       } else {
         $result["mastersToHosts"]["ZOOKEEPER_SERVER"] = $this->createHostMap($hostInfo);
       }
+    } else if ($hadoopVersion == AMBARI_HADOOP_2 && 
+               array_key_exists("ZOOKEEPER2", $serviceInfo)) { 
+      if (array_key_exists("ZOOKEEPER2_SERVER", $result["mastersToHosts"])) {
+        array_push($result["mastersToHosts"]["ZOOKEEPER2_SERVER"]["hostNames"],
+            $hostInfo["hostName"]);
+      } else {
+        $result["mastersToHosts"]["ZOOKEEPER2_SERVER"] = $this->createHostMap($hostInfo);
+      }
     }
     return $result;
   }
@@ -135,31 +154,52 @@
   /**
    * Helper function to add Ganglia master server.
    */
-  function addGangliaServer($result, $hostInfo) {
-    $result["mastersToHosts"]["GANGLIA_MONITOR_SERVER"] = $this->createHostMap($hostInfo);
+  function addGangliaServer($hadoopVersion, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1) {
+      $result["mastersToHosts"]["GANGLIA_MONITOR_SERVER"] = $this->createHostMap($hostInfo);
+    } else if ($hadoopVersion == AMBARI_HADOOP_2) {
+      $result["mastersToHosts"]["GANGLIA2_MONITOR_SERVER"] = $this->createHostMap($hostInfo);
+    }
     return $result;
   }
 
   /**
    * Helper function to add Nagios server.
    */
-  function addNagiosServer($result, $hostInfo) {
-    $result["mastersToHosts"]["NAGIOS_SERVER"] = $this->createHostMap($hostInfo);
+  function addNagiosServer($hadoopVersion, $result, $hostInfo) {
+    if ($hadoopVersion == AMBARI_HADOOP_1) {
+      $result["mastersToHosts"]["NAGIOS_SERVER"] = $this->createHostMap($hostInfo);
+    } else if ($hadoopVersion == AMBARI_HADOOP_2) {
+      $result["mastersToHosts"]["NAGIOS_SERVER2"] = $this->createHostMap($hostInfo);
+    }
     return $result;
   }
 
   /**
    * Adds all the slaves to the hostlist given whats enabled
    */
-  function addSlaves($db, $hostlist, $clusterName, $services, $gangliaMaster) {
-    $db->addHostsToComponent($clusterName, "TASKTRACKER", $hostlist, "ASSIGNED", "");
-    $db->addHostsToComponent($clusterName, "DATANODE", $hostlist, "ASSIGNED", "");
-    if (array_key_exists("HBASE", $services)) {
-      $db->addHostsToComponent($clusterName, "HBASE_REGIONSERVER", $hostlist, "ASSIGNED", "");
-    }
-    if (array_key_exists("GANGLIA", $services)) {
-      if (sizeof($hostlist) > 0) {
-        $db->addHostsToComponent($clusterName, "GANGLIA_MONITOR", $hostlist, "ASSIGNED", "");
+  function addSlaves($hadoopVersion, $db, $hostlist, $clusterName, $services, $gangliaMaster) {
+    if ($hadoopVersion == AMBARI_HADOOP_1) {
+      $db->addHostsToComponent($clusterName, "TASKTRACKER", $hostlist, "ASSIGNED", "");
+      $db->addHostsToComponent($clusterName, "DATANODE", $hostlist, "ASSIGNED", "");
+      if (array_key_exists("HBASE", $services)) {
+        $db->addHostsToComponent($clusterName, "HBASE_REGIONSERVER", $hostlist, "ASSIGNED", "");
+      }
+      if (array_key_exists("GANGLIA", $services)) {
+        if (sizeof($hostlist) > 0) {
+          $db->addHostsToComponent($clusterName, "GANGLIA_MONITOR", $hostlist, "ASSIGNED", "");
+        }
+      }
+    } else if ($hadoopVersion == AMBARI_HADOOP_2) {
+      $db->addHostsToComponent($clusterName, "NODEMANAGER", $hostlist, "ASSIGNED", "");
+      $db->addHostsToComponent($clusterName, "DATANODE2", $hostlist, "ASSIGNED", "");
+      if (array_key_exists("HBASE2", $services)) {
+        $db->addHostsToComponent($clusterName, "HBASE2_REGIONSERVER", $hostlist, "ASSIGNED", "");
+      }
+      if (array_key_exists("GANGLIA2", $services)) {
+        if (sizeof($hostlist) > 0) {
+          $db->addHostsToComponent($clusterName, "GANGLIA2_MONITOR", $hostlist, "ASSIGNED", "");
+        }
       }
     }
   }
@@ -200,17 +240,31 @@
     }
     if ($numNodes <= 5) {
       /* all slaves except for the namenode */
-      $excludeList = $this->getExcludeHosts($allHosts, $masterToHost["NAMENODE"]);
+      if (array_key_exists("HDFS", $services)) {
+        $excludeList = $this->getExcludeHosts($allHosts, $masterToHost["NAMENODE"]);
+      }
+      if (array_key_exists("HDFS2", $services)) {
+        $excludeList = $this->getExcludeHosts($allHosts, $masterToHost["NAMENODE2"]);
+      }
       return $excludeList;
     }
     if ($numNodes > 5) {
       /* all slaves except for the namenode/JT/Hbase master */
       $excludeHosts = array();
-      array_push($excludeHosts, $masterToHost["NAMENODE"][0]);
-      array_push($excludeHosts, $masterToHost["JOBTRACKER"][0]);
+      if (array_key_exists("HDFS", $services)) {
+        array_push($excludeHosts, $masterToHost["NAMENODE"][0]);
+        array_push($excludeHosts, $masterToHost["JOBTRACKER"][0]);
+      }
+      if (array_key_exists("HDFS2", $services)) {
+        array_push($excludeHosts, $masterToHost["NAMENODE2"][0]);
+        array_push($excludeHosts, $masterToHost["RESOURCEMANAGER"][0]);
+      }
       if (array_key_exists("HBASE", $services)) {
         array_push($excludeHosts, $masterToHost["HBASE_MASTER"][0]);
       }
+      if (array_key_exists("HBASE2", $services)) {
+        array_push($excludeHosts, $masterToHost["HBASE2_MASTER"][0]);
+      }
       $excludeList = $this->getExcludeHosts($allHosts, $excludeHosts);
       return $excludeList;
     }
@@ -244,6 +298,10 @@
     $return["error"] = "";
     $this->logger->log_info("Updating with Info from User: \n".$clusterName."\n "
         .print_r($masterToHost, true));
+
+    $stackVersion = $db->getHadoopStackVersion($clusterName);
+    $hadoopVersion = $stackVersion['version'];
+ 
     $allHostsDBInfo = $db->getAllHostsInfo($clusterName,
         array("=" => array ( "discoveryStatus" => "SUCCESS")), array());
     if ($allHostsDBInfo["result"] != 0) {
@@ -265,7 +323,7 @@
     $services = $this->filterEnabledServices($services_tmp);
     $clusterInfo = $db->getClusterState($clusterName);
     if ($clusterInfo["result"] != 0) {
-      $this->logger->log_error("Error getting cluster state ". $clusterInfo["error"]);
+      $this->logger->log_error("Error getting cluster state ".$clusterInfo["error"]);
       $return["result"] = $clusterInfo["result"];
       $return["error"] = $clusterInfo["error"];
       return $return;
@@ -285,10 +343,10 @@
     foreach($masterToHost as $componentName=>$hostNames) {
       $this->logger->log_info("For cluster  $clusterName setting $componentName to host \n". print_r($hostNames, true));
       $db->addHostsToComponent($clusterName, $componentName, $hostNames, "ASSIGNED", "");
-      if ($componentName == "GANGLIA_MONITOR_SERVER") {
+      if ($componentName == "GANGLIA_MONITOR_SERVER" || $componentName == "GANGLIA2_MONITOR_SERVER") {
         $gangliaMaster = $hostNames[0];
       }
-      if ($componentName == "ZOOKEEPER_SERVER") { 
+      if ($hadoopVersion == AMBARI_HADOOP_1 && $componentName == "ZOOKEEPER_SERVER") { 
         $sizeHosts = sizeof($hostNames);
         if ($sizeHosts == 1) {
           $hostConfig = array ( "ZOOKEEPER_SERVER" => array( $hostNames[0] => array ( "myid" => 1 )));
@@ -301,6 +359,19 @@
           $db->updateHostRoleConfigs($clusterName, $hostConfig);
         }
       }
+      if ($hadoopVersion == AMBARI_HADOOP_2 && $componentName == "ZOOKEEPER2_SERVER") { 
+        $sizeHosts = sizeof($hostNames);
+        if ($sizeHosts == 1) {
+          $hostConfig = array ( "ZOOKEEPER2_SERVER" => array( $hostNames[0] => array ( "myid" => 1 )));
+          $db->updateHostRoleConfigs($clusterName, $hostConfig);
+        } else {
+          $hostConfig = array( "ZOOKEEPER2_SERVER" => array() );
+          for ($i=0; $i < 3; $i++) {
+            $hostConfig["ZOOKEEPER2_SERVER"][$hostNames[$i]] = array ( "myid" => $i+1 );
+          }
+          $db->updateHostRoleConfigs($clusterName, $hostConfig);
+        }
+      }
     }
     /** make sure ganglia is added to all the masters **/
     $masterHosts = array();
@@ -310,21 +381,29 @@
         $this->logger->log_debug("Adding host $hostName for GANGLIA_MONITOR");
       }
     }
-    $db->addHostsToComponent($clusterName, "GANGLIA_MONITOR", $masterHosts, "ASSIGNED", "");
+    if($hadoopVersion == AMBARI_HADOOP_1) {
+      $db->addHostsToComponent($clusterName, "GANGLIA_MONITOR", $masterHosts, "ASSIGNED", "");
+    } else if ($hadoopVersion == AMBARI_HADOOP_2) {
+      $db->addHostsToComponent($clusterName, "GANGLIA2_MONITOR", $masterHosts, "ASSIGNED", "");
+    }
 
     // add DASHBOARD component
     $dashhostName = strtolower(exec('hostname -f'));
     $db->addHostsToComponent($clusterName, "DASHBOARD" , array($dashhostName), "ASSIGNED", "");
 
     $slaveList = $this->getSlaveList($allHosts, $masterToHost, $services);
-    $this->logger->log_info("Slave List \n".print_r($slaveList, true));
-    $this->addSlaves($db, $slaveList, $clusterName, $services, $gangliaMaster);
+
+    $stackVersion = $db->getHadoopStackVersion($clusterName);
+    $hadoopVersion = $stackVersion['version'];
+    $this->logger->log_info("Slave List for HDP $hadoopVersion \n".print_r($slaveList, true));
+
+    $this->addSlaves($hadoopVersion, $db, $slaveList, $clusterName, $services, $gangliaMaster);
     /* pick a node for gateway */
     $gateway = $slaveList[0];
     //    print_r($services);
     foreach ($services as $key=>$s) {
       $serviceName = $s["serviceName"];
-      if ($serviceName != "GANGLIA" && $serviceName != "NAGIOS" && $serviceName != "MISCELLANEOUS" && $serviceName != "DASHBOARD") {
+      if ($serviceName != "GANGLIA" && $serviceName != "GANGLIA2" && $serviceName != "NAGIOS" && $serviceName != "NAGIOS2" && $serviceName != "MISCELLANEOUS" && $serviceName != "DASHBOARD") {
         $db->addHostsToComponent($clusterName, $serviceName."_CLIENT", array($gateway), "ASSIGNED", "");
       }
     }
@@ -396,76 +475,80 @@
     $monitorIndex = 0;
     $allHostsInfoExHMC = $this->excludeHMCHost($allHostsInfo);
     $this->logger->log_debug('num nodes='.$numNodes);  
+    
+    $stackVersion = $db->getHadoopStackVersion($clustername);
+    $hadoopVersion = $stackVersion['version'];
+
     if ( $numNodes == 1 ) {
-      $result = $this->addNameNode($services, $result, $allHostsInfo[0]);
-      $result = $this->addSNameNode($services, $result, $allHostsInfo[0]);
-      $result = $this->addJobTracker($services, $result, $allHostsInfo[0]);
-      $result = $this->addHBaseMaster($services, $result, $allHostsInfo[0]);
-      $result = $this->addOozieServer($services, $result, $allHostsInfo[0]);
-      $result = $this->addHiveServer($services, $result, $allHostsInfo[0]);
-      $result = $this->addTempletonServer($services, $result, $allHostsInfo[0]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[0]);
-      $result = $this->addGangliaServer($result, $allHostsInfo[0]);
-      $result = $this->addNagiosServer($result, $allHostsInfo[0]);
+      $result = $this->addNameNode($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addSNameNode($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addResourceManager($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addHBaseMaster($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addOozieServer($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addHiveServer($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addTempletonServer($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addGangliaServer($hadoopVersion, $result, $allHostsInfo[0]);
+      $result = $this->addNagiosServer($hadoopVersion, $result, $allHostsInfo[0]);
       return $result;
     }
     if ( $numNodes < 3) {
-      $result = $this->addNameNode($services, $result, $allHostsInfo[0]);
-      $result = $this->addSNameNode($services, $result, $allHostsInfo[1]);
-      $result = $this->addJobTracker($services, $result, $allHostsInfo[1]);
-      $result = $this->addHBaseMaster($services, $result, $allHostsInfo[0]);
-      $result = $this->addOozieServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addHiveServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addTempletonServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[0]);
-      $result = $this->addGangliaServer($result, $allHostsInfoExHMC[$monitorIndex]);
-      $result = $this->addNagiosServer($result, $allHostsInfoExHMC[$monitorIndex]); 
+      $result = $this->addNameNode($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addSNameNode($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addResourceManager($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addHBaseMaster($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addOozieServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addHiveServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addTempletonServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addGangliaServer($hadoopVersion, $result, $allHostsInfoExHMC[$monitorIndex]);
+      $result = $this->addNagiosServer($hadoopVersion, $result, $allHostsInfoExHMC[$monitorIndex]); 
       return $result;
     }
     if ( $numNodes <= 5) {
-      $result = $this->addNameNode($services, $result, $allHostsInfo[0]);
-      $result = $this->addSNameNode($services, $result, $allHostsInfo[1]);
-      $result = $this->addJobTracker($services, $result, $allHostsInfo[1]);
-      $result = $this->addHBaseMaster($services, $result, $allHostsInfo[0]);
-      $result = $this->addOozieServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addHiveServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addTempletonServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[0]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[2]);
-      $result = $this->addGangliaServer($result, $allHostsInfoExHMC[$monitorIndex]);
-      $result = $this->addNagiosServer($result, $allHostsInfoExHMC[$monitorIndex]); 
+      $result = $this->addNameNode($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addSNameNode($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addResourceManager($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addHBaseMaster($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addOozieServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addHiveServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addTempletonServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[2]);
+      $result = $this->addGangliaServer($hadoopVersion, $result, $allHostsInfoExHMC[$monitorIndex]);
+      $result = $this->addNagiosServer($hadoopVersion, $result, $allHostsInfoExHMC[$monitorIndex]); 
       return $result;
     }
 
     if ( $numNodes <= 30) {
-      $result = $this->addNameNode($services, $result, $allHostsInfo[0]);
-      $result = $this->addSNameNode($services, $result, $allHostsInfo[1]);
-      $result = $this->addJobTracker($services, $result, $allHostsInfo[1]);
-      $result = $this->addHBaseMaster($services, $result, $allHostsInfo[2]);
-      $result = $this->addOozieServer($services, $result, $allHostsInfo[2]);
-      $result = $this->addHiveServer($services, $result, $allHostsInfo[2]);
-      $result = $this->addTempletonServer($services, $result, $allHostsInfo[2]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[0]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[2]);
-      $result = $this->addGangliaServer($result, $allHostsInfoExHMC[$monitorIndex]);
-      $result = $this->addNagiosServer($result, $allHostsInfoExHMC[$monitorIndex]);
+      $result = $this->addNameNode($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addSNameNode($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addResourceManager($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addHBaseMaster($hadoopVersion, $services, $result, $allHostsInfo[2]);
+      $result = $this->addOozieServer($hadoopVersion, $services, $result, $allHostsInfo[2]);
+      $result = $this->addHiveServer($hadoopVersion, $services, $result, $allHostsInfo[2]);
+      $result = $this->addTempletonServer($hadoopVersion, $services, $result, $allHostsInfo[2]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[2]);
+      $result = $this->addGangliaServer($hadoopVersion, $result, $allHostsInfoExHMC[$monitorIndex]);
+      $result = $this->addNagiosServer($hadoopVersion, $result, $allHostsInfoExHMC[$monitorIndex]);
       return $result;
     }
     if ( $numNodes > 30) {
-      $result = $this->addNameNode($services, $result, $allHostsInfo[0]);
-      $result = $this->addSNameNode($services, $result, $allHostsInfo[1]);
-      $result = $this->addJobTracker($services, $result, $allHostsInfo[2]);
-      $result = $this->addHBaseMaster($services, $result, $allHostsInfo[3]);
-      $result = $this->addOozieServer($services, $result, $allHostsInfo[3]);
-      $result = $this->addHiveServer($services, $result, $allHostsInfo[4]);
-      $result = $this->addTempletonServer($services, $result, $allHostsInfo[4]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[0]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[1]);
-      $result = $this->addZooKeeperServer($services, $result, $allHostsInfo[2]);
-      $result = $this->addGangliaServer($result, $allHostsInfoExHMC[$monitorIndex]);
-      $result = $this->addNagiosServer($result, $allHostsInfoExHMC[$monitorIndex]);
+      $result = $this->addNameNode($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addSNameNode($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addResourceManager($hadoopVersion, $services, $result, $allHostsInfo[2]);
+      $result = $this->addHBaseMaster($hadoopVersion, $services, $result, $allHostsInfo[3]);
+      $result = $this->addOozieServer($hadoopVersion, $services, $result, $allHostsInfo[3]);
+      $result = $this->addHiveServer($hadoopVersion, $services, $result, $allHostsInfo[4]);
+      $result = $this->addTempletonServer($hadoopVersion, $services, $result, $allHostsInfo[4]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[0]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[1]);
+      $result = $this->addZooKeeperServer($hadoopVersion, $services, $result, $allHostsInfo[2]);
+      $result = $this->addGangliaServer($hadoopVersion, $result, $allHostsInfoExHMC[$monitorIndex]);
+      $result = $this->addNagiosServer($hadoopVersion, $result, $allHostsInfoExHMC[$monitorIndex]);
       return $result;
     }
   }
diff --git a/hmc/php/util/suggestProperties.php b/hmc/php/util/suggestProperties.php
index 78e705b..523d833 100644
--- a/hmc/php/util/suggestProperties.php
+++ b/hmc/php/util/suggestProperties.php
@@ -203,7 +203,7 @@
             foreach ($svcInfo["components"] as $compName => $compInfo) {
               if (isset($compInfo["isClient"]) && $compInfo["isClient"]) {
                 $ignoredComponents[$compName] = TRUE;
-              } else if ($compName == "GANGLIA_MONITOR") {
+              } else if ($compName == "GANGLIA_MONITOR" || $compName == "GANGLIA2_MONITOR") {
                 $ignoredComponents[$compName] = TRUE;
               }
             }
@@ -248,24 +248,45 @@
       $result["configs"]["mapred_red_tasks_max"] = $numRed;
     }
 
+    $stackVersion = $db->getHadoopStackVersion($clusterName);
+    $hadoopVersion = $stackVersion['version'];
+
+    $nameNodeStr = "NAMENODE";
+    $sNameNodeStr = "SNAMENODE";
+    $computeMasterStr = "JOBTRACKER";
+    $hBaseMasterStr = "HBASE_MASTER";
+    $computeSlaveStr = "TASKTRACKER";
+    $hdfsSlaveStr = "DATANODE";
+    $hbaseSlaveStr = "HBASE_REGIONSERVER";
+
+    if ($hadoopVersion == AMBARI_HADOOP_2) { 
+      $nameNodeStr = "NAMENODE2";
+      $sNameNodeStr = "SNAMENODE2";
+      $computeMasterStr = "RESOURCEMANAGER";
+      $hBaseMasterStr = "HBASE2_MASTER";
+      $computeSlaveStr = "NODEMANAGER";
+      $hdfsSlaveStr = "DATANODE2";
+      $hbaseSlaveStr = "HBASE2_REGIONSERVER";
+    }
+
     /* suggest memory for all the needed master daemons */
     /* assume MR and HDFS are always selected */
-    $nnHeap = $this->allocateHeapSizeForDaemon("NAMENODE", $hostRoles,
+    $nnHeap = $this->allocateHeapSizeForDaemon($nameNodeStr, $hostRoles,
         $hostInfoMap, $allHostsToComponents, FALSE);
     $result["configs"]["namenode_heapsize"] = $nnHeap;
 
     /* suggest the jt heap size */
-    $jtHeap = $this->allocateHeapSizeForDaemon("JOBTRACKER", $hostRoles,
+    $jtHeap = $this->allocateHeapSizeForDaemon($computeMasterStr, $hostRoles,
         $hostInfoMap, $allHostsToComponents, FALSE);
     $result["configs"]["jtnode_heapsize"] = $jtHeap;
 
     /* check if HBase is installed and then pick */
-    if (array_key_exists("HBASE", $services)) {
-      $hbaseHeap = $this->allocateHeapSizeForDaemon("HBASE_MASTER", $hostRoles,
+    if (array_key_exists("HBASE", $services) || array_key_exists("HBASE2", $services)) {
+      $hbaseHeap = $this->allocateHeapSizeForDaemon($hBaseMasterStr, $hostRoles,
           $hostInfoMap, $allHostsToComponents, FALSE);
       $result["configs"]["hbase_master_heapsize"] = $hbaseHeap;
     }
-    $heapSize = $this->allocateHeapSizeWithMax("DATANODE", $hostRoles,
+    $heapSize = $this->allocateHeapSizeWithMax($hdfsSlaveStr, $hostRoles,
         $hostInfoMap, $allHostsToComponents, TRUE, 2048);
     // cap the datanode heap size and hadoop heap size
     $result["configs"]["dtnode_heapsize"] = $heapSize;
@@ -273,15 +294,16 @@
 
     // TODO fix - this should be based on heap size divided by max task
     // limit on the host
-    $heapSize = $this->allocateHeapSizeForDaemon("TASKTRACKER", $hostRoles,
+    $heapSize = $this->allocateHeapSizeForDaemon($computeSlaveStr, $hostRoles,
         $hostInfoMap, $allHostsToComponents, TRUE);
-    $heapSizeWithMax = $this->allocateHeapSizeWithMax("TASKTRACKER", $hostRoles,
-        $hostInfoMap, $allHostsToComponents, TRUE, 2048);
-    $this->logger->log_info("Maxed Heap Size for MR Child opts ".$heapSizeWithMax);
-    $result["configs"]["mapred_child_java_opts_sz"] = $heapSizeWithMax;
+    $mrHeapSizeWithMax = $this->allocateHeapSizeWithMax($computeSlaveStr, $hostRoles,
+        $hostInfoMap, $allHostsToComponents, TRUE, 1024);
+    
+    $this->logger->log_info("Maxed Heap Size for MR Child opts ".$mrHeapSizeWithMax);
+    $result["configs"]["mapred_child_java_opts_sz"] = $mrHeapSizeWithMax;
 
-    if (array_key_exists("HBASE", $services)) {
-      $heapSize = $this->allocateHeapSizeForDaemon("HBASE_REGIONSERVER", $hostRoles,
+    if (array_key_exists("HBASE", $services) || array_key_exists("HBASE2", $services)) {
+      $heapSize = $this->allocateHeapSizeForDaemon($hbaseSlaveStr, $hostRoles,
           $hostInfoMap, $allHostsToComponents, FALSE);
       $result["configs"]["hbase_regionserver_heapsize"] = $heapSize;
     }
@@ -422,19 +444,41 @@
       }
     }
 
-    $memProps = array (
-        "namenode_heapsize" => array ( "role" => "NAMENODE", "32bit" => FALSE),
-        "jtnode_heapsize" => array ( "role" => "JOBTRACKER", "32bit" => FALSE),
-        "dtnode_heapsize" => array ( "role" => "DATANODE", "32bit" => TRUE),
-        "hadoop_heapsize" => array ( "role" => "DATANODE", "32bit" => TRUE),
-        "mapred_child_java_opts_sz" => array ( "role" => "TASKTRACKER", "32bit" => TRUE)
-      );
+    $stackVersion = $db->getHadoopStackVersion($clusterName);
+    $hadoopVersion = $stackVersion['version'];
 
-    if (array_key_exists("HBASE", $services)) {
+    $memProps = array();
+    if ($hadoopVersion == AMBARI_HADOOP_1) { 
+      $memProps = array (
+          "namenode_heapsize" => array ( "role" => "NAMENODE", "32bit" => FALSE),
+          "jtnode_heapsize" => array ( "role" => "JOBTRACKER", "32bit" => FALSE),
+          "dtnode_heapsize" => array ( "role" => "DATANODE", "32bit" => TRUE),
+          "hadoop_heapsize" => array ( "role" => "DATANODE", "32bit" => TRUE),
+          "mapred_child_java_opts_sz" => array ( "role" => "TASKTRACKER", "32bit" => TRUE)
+        );
+    } else if ($hadoopVersion == AMBARI_HADOOP_2) { 
+      $memProps = array (
+          "namenode_heapsize" => array ( "role" => "NAMENODE2", "32bit" => FALSE),
+          "resourcemanager_heapsize" => array ( "role" => "RESOURCEMANAGER", "32bit" => FALSE),
+          "dtnode_heapsize" => array ( "role" => "DATANODE2", "32bit" => TRUE),
+          "hadoop_heapsize" => array ( "role" => "DATANODE2", "32bit" => TRUE)
+        );
+    }
+
+    $hBaseStr = "HBASE";
+    $hBaseMasterStr = "HBASE_MASTER";
+    $hbaseSlaveStr = "HBASE2_REGIONSERVER";
+    if ($hadoopVersion == "AMBARI_HADOOP_2") {
+      $hBaseStr = "HBASE2";
+      $hBaseMasterStr = "HBASE2_MASTER";
+      $hbaseSlaveStr = "HBASE2_REGIONSERVER";
+    }
+
+    if (array_key_exists($hBaseStr, $services)) {
       $memProps["hbase_master_heapsize"] =
-          array ( "role" => "HBASE_MASTER", "32bit" => FALSE);
+          array ( "role" => $hBaseMasterStr, "32bit" => FALSE);
       $memProps["hbase_regionserver_heapsize"] =
-          array ( "role" => "HBASE_REGIONSERVER", "32bit" => FALSE);
+          array ( "role" => $hbaseSlaveStr, "32bit" => FALSE);
     }
 
     foreach ($memProps as $prop => $propInfo) {
diff --git a/hmc/puppet/modules/hdp-hadoop/manifests/params.pp b/hmc/puppet/modules/hdp-hadoop/manifests/params.pp
index 79ab848..efde1ec 100644
--- a/hmc/puppet/modules/hdp-hadoop/manifests/params.pp
+++ b/hmc/puppet/modules/hdp-hadoop/manifests/params.pp
@@ -58,15 +58,15 @@
   $namenode_opt_newsize = hdp_default("hadoop/hadoop-env/namenode_opt_newsize","640m")
   
   ### compression related
-  if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
+  if (($hdp::params::mapreduce_lzo_enabled == true) and ($hdp::params::mapreduce_snappy_enabled == true)) {
     $mapred_compress_map_output = true
     $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
     $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::snappy_enabled == true) {
+  } elsif ($hdp::params::mapreduce_snappy_enabled == true) {
     $mapred_compress_map_output = true
     $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
     $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::lzo_enabled == true) {
+  } elsif ($hdp::params::mapreduce_lzo_enabled == true) {
     $mapred_compress_map_output = true
     $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
     $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
diff --git a/hmc/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb b/hmc/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
index 68c9ecd..d73589b 100644
--- a/hmc/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
+++ b/hmc/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
@@ -40,14 +40,14 @@
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/log/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/log/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/log/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/log/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx1024m -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%> ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/log/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/log/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx128m ${HADOOP_CLIENT_OPTS}"
diff --git a/hmc/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb b/hmc/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
index f1d7847..db771f2 100644
--- a/hmc/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
+++ b/hmc/puppet/modules/hdp-hbase/templates/hbase-env.sh.erb
@@ -45,7 +45,7 @@
 #
 # export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
 export HBASE_MASTER_OPTS="-Xmx<%=scope.function_hdp_template_var("hbase_master_heapsize")%>"
-export HBASE_REGIONSERVER_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseParNewGC -Xmn<%=scope.function_hdp_template_var("hbase_regionserver_xmn_size")%> -XX:CMSInitiatingOccupancyFraction=80 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:<%=scope.function_hdp_template_var("hbase_log_dir")%>/log/$USER/gc.log-`date +'%Y%m%d%H%M'` -Xms<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -Xmx<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -XX:ErrorFile=<%=scope.function_hdp_template_var("hbase_log_dir")%>/log/$USER/hs_err_pid%p.log"
+export HBASE_REGIONSERVER_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseParNewGC -Xmn<%=scope.function_hdp_template_var("hbase_regionserver_xmn_size")%> -XX:CMSInitiatingOccupancyFraction=80 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:<%=scope.function_hdp_template_var("hbase_log_dir")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -Xms<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -Xmx<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -XX:ErrorFile=<%=scope.function_hdp_template_var("hbase_log_dir")%>/$USER/hs_err_pid%p.log"
 # export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
 # export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
 
diff --git a/hmc/puppet/modules/hdp-hive/manifests/params.pp b/hmc/puppet/modules/hdp-hive/manifests/params.pp
index 5e77e38..4c72326 100644
--- a/hmc/puppet/modules/hdp-hive/manifests/params.pp
+++ b/hmc/puppet/modules/hdp-hive/manifests/params.pp
@@ -55,5 +55,5 @@
   ###mysql connector
   $download_url = $hdp::params::gpl_artifacts_download_url
   $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
-  $hive_aux_jars_path =  hdp_default("hive_aux_jars_path","/usr/lib/hcatalog/share/hcatalog/hcatalog-0.4.0.14.jar")
+  $hive_aux_jars_path =  hdp_default("hive_aux_jars_path","/usr/lib/hcatalog/share/hcatalog/hcatalog-0.4.0.15.jar")
 }
diff --git a/hmc/puppet/modules/hdp-hive/manifests/service.pp b/hmc/puppet/modules/hdp-hive/manifests/service.pp
index e7b5ff4..f54e1f8 100644
--- a/hmc/puppet/modules/hdp-hive/manifests/service.pp
+++ b/hmc/puppet/modules/hdp-hive/manifests/service.pp
@@ -35,8 +35,8 @@
     $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
   } elsif ($ensure == 'stopped') {
     #TODO: this needs to be fixed
-    $daemon_cmd = "ps aux | awk '{print \$1,\$2}' | grep ${user} | awk '{print \$2}' | xargs kill >/dev/null 2>&1"
-    $no_op_test = "ps aux | grep -i [h]ive"
+    $daemon_cmd = "ps aux | awk '{print \$1,\$2}' | grep \"^${user} \" | awk '{print \$2}' | xargs kill >/dev/null 2>&1"
+    $no_op_test = "ps aux | awk '{print \$1,\$2}' | grep \"^${user} \""
   } else {
     $daemon_cmd = undef
   }
diff --git a/hmc/puppet/modules/hdp-nagios/files/check_aggregate.php b/hmc/puppet/modules/hdp-nagios/files/check_aggregate.php
index 62ad74d..f4063fb 100755
--- a/hmc/puppet/modules/hdp-nagios/files/check_aggregate.php
+++ b/hmc/puppet/modules/hdp-nagios/files/check_aggregate.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/hmc/puppet/modules/hdp-nagios/files/check_datanode_storage.php b/hmc/puppet/modules/hdp-nagios/files/check_datanode_storage.php
index 46dba72..079d904 100644
--- a/hmc/puppet/modules/hdp-nagios/files/check_datanode_storage.php
+++ b/hmc/puppet/modules/hdp-nagios/files/check_datanode_storage.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/hmc/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php b/hmc/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php
index a9d57a3..bc34fe8 100755
--- a/hmc/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php
+++ b/hmc/puppet/modules/hdp-nagios/files/check_hdfs_blocks.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/hmc/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php b/hmc/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php
index 8167e3a..31ed463 100755
--- a/hmc/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php
+++ b/hmc/puppet/modules/hdp-nagios/files/check_hdfs_capacity.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/hmc/puppet/modules/hdp-nagios/files/check_name_dir_status.php b/hmc/puppet/modules/hdp-nagios/files/check_name_dir_status.php
index 61356a3..d2bf843 100755
--- a/hmc/puppet/modules/hdp-nagios/files/check_name_dir_status.php
+++ b/hmc/puppet/modules/hdp-nagios/files/check_name_dir_status.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/hmc/puppet/modules/hdp-nagios/files/check_oozie_status.sh b/hmc/puppet/modules/hdp-nagios/files/check_oozie_status.sh
index 70b73c5..f22da8b 100755
--- a/hmc/puppet/modules/hdp-nagios/files/check_oozie_status.sh
+++ b/hmc/puppet/modules/hdp-nagios/files/check_oozie_status.sh
@@ -20,6 +20,7 @@
 #
 #
 # OOZIE_URL is of the form http://<hostname>:<port>/oozie
+# OOZIE_URL: http://hortonworks-sandbox.localdomain:11000/oozie
 HOST=$1
 PORT=$2
 JAVA_HOME=$3
diff --git a/hmc/puppet/modules/hdp-nagios/files/check_puppet_agent_status.php b/hmc/puppet/modules/hdp-nagios/files/check_puppet_agent_status.php
index 0e8c661..b9a3565 100644
--- a/hmc/puppet/modules/hdp-nagios/files/check_puppet_agent_status.php
+++ b/hmc/puppet/modules/hdp-nagios/files/check_puppet_agent_status.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/hmc/puppet/modules/hdp-nagios/files/check_rpcq_latency.php b/hmc/puppet/modules/hdp-nagios/files/check_rpcq_latency.php
index 973a12d..9ed07cc 100755
--- a/hmc/puppet/modules/hdp-nagios/files/check_rpcq_latency.php
+++ b/hmc/puppet/modules/hdp-nagios/files/check_rpcq_latency.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/hmc/puppet/modules/hdp-nagios/manifests/server.pp b/hmc/puppet/modules/hdp-nagios/manifests/server.pp
index 00c9ace..d135582 100644
--- a/hmc/puppet/modules/hdp-nagios/manifests/server.pp
+++ b/hmc/puppet/modules/hdp-nagios/manifests/server.pp
@@ -38,6 +38,11 @@
       unless => "test ! -e  /var/nagios/rw/nagios.cmd"
     }
 
+    hdp::exec { "rm -rf /tmp/hadoop-nagios" :
+      command => "rm -rf /tmp/hadoop-nagios",
+      unless => "test ! -e  /tmp/hadoop-nagios"
+    }
+
     hdp::directory { $nagios_config_dir:
       service_state => $service_state,
       force => true
diff --git a/hmc/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb b/hmc/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
index 9392662..ca712c7 100644
--- a/hmc/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
+++ b/hmc/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
@@ -29,27 +29,27 @@
 # Check data node storage full 
 define command {
         command_name    check_datanode_storage
-        command_line    $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
+        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
        }
 
 define command{
         command_name    check_hdfs_blocks
-        command_line    $USER1$/check_hdfs_blocks.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
+        command_line    php $USER1$/check_hdfs_blocks.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
        }
 
 define command{
         command_name    check_hdfs_capacity
-        command_line    $USER1$/check_hdfs_capacity.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
+        command_line    php $USER1$/check_hdfs_capacity.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
        }
 
 define command{
         command_name    check_aggregate
-        command_line    $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
+        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
        }
 
 define command{
         command_name    check_rpcq_latency
-        command_line    $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$
+        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$
        }
 
 define command{
@@ -64,12 +64,12 @@
 
 define command{
         command_name    check_name_dir_status
-        command_line    $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$
+        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$
        }
 
 define command{
         command_name    check_puppet_agent_status
-        command_line    $USER1$/check_puppet_agent_status.php -h $HOSTADDRESS$
+        command_line    php $USER1$/check_puppet_agent_status.php -h $HOSTADDRESS$
        }
 
 define command{
diff --git a/hmc/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp b/hmc/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
index e49418a..e592141 100644
--- a/hmc/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
+++ b/hmc/puppet/modules/hdp-oozie/manifests/download-ext-zip.pp
@@ -22,24 +22,6 @@
 {
   include hdp-oozie::params
 
-  $zip_name = $hdp-oozie::params::ext_zip_name
-  $target = "${hdp::params::artifact_dir}/${zip_name}"
-
-  anchor { 'hdp-oozie::download-ext-zip::begin':}
-
-   hdp::package { 'extjs' :
-     require   => Anchor['hdp-oozie::download-ext-zip::begin']
-   }
-
-   hdp::exec { 'mkdir -p ${artifact_dir} ;  cp /tmp/HDP-oozie/${zip_name} ${target}':
-       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-oozie/${zip_name} ${target}",
-       unless  => "test -f ${target}",
-       creates => $target,
-       path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['extjs'],
-       notify  =>  Anchor['hdp-oozie::download-ext-zip::end'],
-   }
-
-   anchor { 'hdp-oozie::download-ext-zip::end':}
+  hdp::package { 'extjs' : }
 
 }
diff --git a/hmc/puppet/modules/hdp-oozie/manifests/params.pp b/hmc/puppet/modules/hdp-oozie/manifests/params.pp
index f8b6d15..0f46e15 100644
--- a/hmc/puppet/modules/hdp-oozie/manifests/params.pp
+++ b/hmc/puppet/modules/hdp-oozie/manifests/params.pp
@@ -26,6 +26,7 @@
   $download_url = $hdp::params::gpl_artifacts_download_url
   $ext_zip_url = "${download_url}/ext-2.2.zip"
   $ext_zip_name = hdp_default("ext_zip_name","ext-2.2.zip")
+  $ext_zip_dir = hdp_default("ext_zip_dir","/usr/share/HDP-oozie/")
 
   ### oozie-env
   $conf_dir = $hdp::params::oozie_conf_dir
diff --git a/hmc/puppet/modules/hdp-oozie/manifests/service.pp b/hmc/puppet/modules/hdp-oozie/manifests/service.pp
index 82e277a..9bcc7fe 100644
--- a/hmc/puppet/modules/hdp-oozie/manifests/service.pp
+++ b/hmc/puppet/modules/hdp-oozie/manifests/service.pp
@@ -32,7 +32,7 @@
   $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/oozie_server.sh"
   $pid_file = "${hdp-oozie::params::oozie_pid_dir}/oozie.pid" 
   $jar_location = $hdp::params::hadoop_jar_location
-  $ext_js_path = "/tmp/HDP-artifacts/${hdp-oozie::params::ext_zip_name}"
+  $ext_js_path = "${hdp-oozie::params::ext_zip_dir}/${hdp-oozie::params::ext_zip_name}"
 
   if ($ensure == 'running') {
     $daemon_cmd = "su - ${user} -c  'cd ${oozie_tmp} ; /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path; /usr/lib/oozie/bin/oozie-start.sh'"
diff --git a/hmc/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp b/hmc/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
index e972313..94618fd 100644
--- a/hmc/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
+++ b/hmc/puppet/modules/hdp-templeton/manifests/download-hive-tar.pp
@@ -22,25 +22,6 @@
 {
   include hdp-templeton::params
 
-  $src_tar_name = $hdp-templeton::params::src_hive_tar_name
-  $dest_tar_name = $hdp-templeton::params::dest_hive_tar_name
-  $target = "${hdp::params::artifact_dir}/${dest_tar_name}"
- 
-  anchor { 'hdp-templeton::download-hive-tar::begin':}         
-
-   hdp::package { 'templeton-tar-hive' :
-     require   => Anchor['hdp-templeton::download-hive-tar::begin']                                                              
-   }
-  
-   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
-       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
-       unless  => "test -f ${target}",
-       creates => $target,
-       path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['templeton-tar-hive'],
-       notify  =>  Anchor['hdp-templeton::download-hive-tar::end'],
-   }
-
-   anchor { 'hdp-templeton::download-hive-tar::end':}       
+  hdp::package { 'templeton-tar-hive' : }
 
 }
diff --git a/hmc/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp b/hmc/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
index e44ce7c..cf32440 100644
--- a/hmc/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
+++ b/hmc/puppet/modules/hdp-templeton/manifests/download-pig-tar.pp
@@ -22,25 +22,6 @@
 {
   include hdp-templeton::params
 
-  $src_tar_name = $hdp-templeton::params::src_pig_tar_name
-  $dest_tar_name = $hdp-templeton::params::dest_pig_tar_name
-  $target = "${hdp::params::artifact_dir}/${dest_tar_name}"
-
-  anchor { 'hdp-templeton::download-pig-tar::begin':}
-
-   hdp::package { 'templeton-tar-pig' :
-     require   => Anchor['hdp-templeton::download-pig-tar::begin']
-   }
-
-   hdp::exec { 'pig ; mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}':
-       command => "mkdir -p ${artifact_dir} ;  cp /tmp/HDP-templeton/${src_tar_name} ${target}",
-       unless  => "test -f ${target}",
-       creates => $target,
-       path    => ["/bin","/usr/bin/"],
-       require => Hdp::Package['templeton-tar-pig'],
-       notify  =>  Anchor['hdp-templeton::download-pig-tar::end'],
-   }
-
-   anchor { 'hdp-templeton::download-pig-tar::end':}
+  hdp::package { 'templeton-tar-pig' : }
 
 }
diff --git a/hmc/puppet/modules/hdp-templeton/manifests/params.pp b/hmc/puppet/modules/hdp-templeton/manifests/params.pp
index 034d19b..e26c057 100644
--- a/hmc/puppet/modules/hdp-templeton/manifests/params.pp
+++ b/hmc/puppet/modules/hdp-templeton/manifests/params.pp
@@ -25,10 +25,10 @@
   ###pig and hive tar url connector
   $download_url = $hdp::params::apache_artifacts_download_url
 
-  $dest_pig_tar_name = hdp_default("dest_pig_tar_name","pig.tar.gz")
-  $dest_hive_tar_name = hdp_default("dest_hive_tar_name","hive.tar.gz")
-  $src_pig_tar_name = hdp_default("src_pig_tar_name","pig-0.9.2.14.tar.gz")
-  $src_hive_tar_name = hdp_default("src_hive_tar_name","hive-0.9.0.14.tar.gz")
+  $pig_tar_dir = hdp_default("pig_tar_dir","/usr/share/HDP-templeton/")
+  $hive_tar_dir = hdp_default("hive_tar_dir","/usr/share/HDP-templeton/")
+  $pig_tar_name = hdp_default("pig_tar_name","pig-0.9.2.15.tar.gz")
+  $hive_tar_name = hdp_default("hive_tar_name","hive-0.9.0.15.tar.gz")
 
   ### templeton-env
   $conf_dir = hdp_default("hadoop/templeton-env/conf_dir","/etc/templeton")
@@ -38,20 +38,20 @@
 
   $templeton_pid_dir = hdp_default("hadoop/templeton-env/templeton_pid_dir","/var/run/templeton")
 
-  $templeton_jar_name= hdp_default("hadoop/templeton-env/templeton_jar_name","templeton-0.1.4.14.jar")
+  $templeton_jar_name= hdp_default("hadoop/templeton-env/templeton_jar_name","templeton-0.1.4.15.jar")
  
   $hadoop_prefix = hdp_default("hadoop/templeton-env/hadoop_prefix","/usr")
   $hive_prefix = hdp_default("hadoop/templeton-env/hive_prefix","/usr")
   
   ### templeton-site
   $hadoop_conf_dir = hdp_default("hadoop/templeton-site/hadoop_conf_dir")
-  $templeton_jar = hdp_default("hadoop/templeton-site/templeton_jar","/usr/share/templeton/templeton-0.1.4.14.jar")
+  $templeton_jar = hdp_default("hadoop/templeton-site/templeton_jar","/usr/share/templeton/templeton-0.1.4.15.jar")
   $zookeeper_jar = hdp_default("hadoop/templeton-site/zookeeper_jar","/usr/lib/zookeeper/zookeeper.jar")
-  $pig_tar_gz = hdp_default("hadoop/templeton-site/pig_tar_gz","$dest_pig_tar_name")
-  $pig_tar_name_hdfs = hdp_default("hadoop/templeton-site/pig_tar_name_hdfs","pig-0.9.2.14")
+  $pig_tar_gz = hdp_default("hadoop/templeton-site/pig_tar_gz","$pig_tar_name")
+  $pig_tar_name_hdfs = hdp_default("hadoop/templeton-site/pig_tar_name_hdfs","pig-0.9.2.15")
 
-  $hive_tar_gz = hdp_default("hadoop/templeton-site/hive_tar_gz","$dest_hive_tar_name")
-  $hive_tar_gz_name = hdp_default("hadoop/templeton-site/hive_tar_gz_name","hive-0.9.0.14")
+  $hive_tar_gz = hdp_default("hadoop/templeton-site/hive_tar_gz","$hive_tar_name")
+  $hive_tar_gz_name = hdp_default("hadoop/templeton-site/hive_tar_gz_name","hive-0.9.0.15")
   $hive_metastore_sasl_enabled = hdp_default("hadoop/templeton-site/hive_metastore_sasl_enabled","no")
 
   $templeton_metastore_principal = hdp_default("hadoop/templeton-site/templeton_metastore_principal")
diff --git a/hmc/puppet/modules/hdp-templeton/manifests/server.pp b/hmc/puppet/modules/hdp-templeton/manifests/server.pp
index 4180bfc..0571189 100644
--- a/hmc/puppet/modules/hdp-templeton/manifests/server.pp
+++ b/hmc/puppet/modules/hdp-templeton/manifests/server.pp
@@ -74,8 +74,7 @@
 
 class hdp-templeton::copy-hdfs-directories($service_state)
 {
- $templeton_user = $hdp-templeton::params::templeton_user
- $pig_src_tar = "$hdp::params::artifact_dir/pig.tar.gz"
+  $templeton_user = $hdp-templeton::params::templeton_user
 
   hdp-hadoop::hdfs::copyfromlocal { '/usr/share/templeton/templeton*jar':
     service_state => $service_state,
@@ -84,23 +83,21 @@
     dest_dir => '/apps/templeton/ugi.jar'
   }
   hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
-   service_state => $service_state,
-   owner => $hdp-templeton::params::templeton_user,
-   mode  => '755',
-   dest_dir => '/apps/templeton/hadoop-streaming.jar'
- }
-  #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::pig_tar_name} instead
-  hdp-hadoop::hdfs::copyfromlocal { '/tmp/HDP-artifacts/pig.tar.gz' :
     service_state => $service_state,
     owner => $hdp-templeton::params::templeton_user,
     mode  => '755',
-    dest_dir => '/apps/templeton/pig.tar.gz'
+    dest_dir => '/apps/templeton/hadoop-streaming.jar'
   }
-  #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::hive_tar_name} instead
-  hdp-hadoop::hdfs::copyfromlocal { '/tmp/HDP-artifacts/hive.tar.gz' :
+  hdp-hadoop::hdfs::copyfromlocal { "${hdp-templeton::params::pig_tar_dir}/${hdp-templeton::params::pig_tar_name}" :
     service_state => $service_state,
     owner => $hdp-templeton::params::templeton_user,
     mode  => '755',
-    dest_dir => '/apps/templeton/hive.tar.gz'
+    dest_dir => "/apps/templeton/${hdp-templeton::params::pig_tar_name}"
+  }
+  hdp-hadoop::hdfs::copyfromlocal { "${hdp-templeton::params::hive_tar_dir}/${hdp-templeton::params::hive_tar_name}" :
+    service_state => $service_state,
+    owner => $hdp-templeton::params::templeton_user,
+    mode  => '755',
+    dest_dir => "/apps/templeton/${hdp-templeton::params::hive_tar_name}"
   }
 }
diff --git a/hmc/puppet/modules/hdp/manifests/params.pp b/hmc/puppet/modules/hdp/manifests/params.pp
index 590d655..2b88449 100644
--- a/hmc/puppet/modules/hdp/manifests/params.pp
+++ b/hmc/puppet/modules/hdp/manifests/params.pp
@@ -124,15 +124,15 @@
   $host_address = undef 
 
   ##### java 
-  $java32_home = hdp_default("java32_home","/usr/jdk32/jdk1.6.0_26")
-  $java64_home = hdp_default("java64_home","/usr/jdk64/jdk1.6.0_26")
+  $java32_home = hdp_default("java32_home","/usr/jdk32/jdk1.6.0_31")
+  $java64_home = hdp_default("java64_home","/usr/jdk64/jdk1.6.0_31")
   
   $wipeoff_data =  hdp_default("wipeoff_data",false) 
 
-  $jdk_location = hdp_default("jdk_location","http://download.oracle.com/otn-pub/java/jdk/6u26-b03")
+  $jdk_location = hdp_default("jdk_location","http://download.oracle.com/otn-pub/java/jdk/6u31-b03")
   $jdk_bins = hdp_default("jdk_bins",{
-    32 => "jdk-6u26-linux-i586.bin",
-    64 => "jdk-6u26-linux-x64.bin"
+    32 => "jdk-6u31-linux-i586.bin",
+    64 => "jdk-6u31-linux-x64.bin"
   })
   
   #####
@@ -270,10 +270,10 @@
       64 =>  ['extjs-2.2-1']
     },
     templeton-tar-hive => {
-      64 => ['templeton-tar-hive-0.0.1.14-1']
+      64 => ['templeton-tar-hive-0.0.1.15-1']
     },
     templeton-tar-pig => {
-      64 => ['templeton-tar-pig-0.0.1.14-1']
+      64 => ['templeton-tar-pig-0.0.1.15-1']
     }
   }
   $packages = 'bigtop' 
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/checkGmetad.sh b/hmc/puppet/modules/hdp2-ganglia/files/checkGmetad.sh
new file mode 100644
index 0000000..e60eb31
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/checkGmetad.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/checkGmond.sh b/hmc/puppet/modules/hdp2-ganglia/files/checkGmond.sh
new file mode 100644
index 0000000..0cec8dc
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/checkGmond.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/checkRrdcached.sh b/hmc/puppet/modules/hdp2-ganglia/files/checkRrdcached.sh
new file mode 100644
index 0000000..d94db5d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/checkRrdcached.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/gmetad.init b/hmc/puppet/modules/hdp2-ganglia/files/gmetad.init
new file mode 100644
index 0000000..5e33ea3
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/gmetad.init
@@ -0,0 +1,71 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/gmetadLib.sh b/hmc/puppet/modules/hdp2-ganglia/files/gmetadLib.sh
new file mode 100644
index 0000000..fa529af
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/gmetadLib.sh
@@ -0,0 +1,196 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+# rrd_rootdir "/some/other/place"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/gmond.init b/hmc/puppet/modules/hdp2-ganglia/files/gmond.init
new file mode 100644
index 0000000..4bedd62
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/gmond.init
@@ -0,0 +1,71 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/gmondLib.sh b/hmc/puppet/modules/hdp2-ganglia/files/gmondLib.sh
new file mode 100644
index 0000000..fe062cf
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/gmondLib.sh
@@ -0,0 +1,536 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = localhost
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  metric {
+    name = "mem_total"
+    title = "Memory Total"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+/* Different than 2.5.x default since the old config made no sense */
+collection_group {
+  collect_every = 1800
+  time_threshold = 3600
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+}
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+/* The gmond cluster master must additionally provide an XML 
+ * description of the cluster to the gmetad that will query it.
+ */
+tcp_accept_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/rrdcachedLib.sh b/hmc/puppet/modules/hdp2-ganglia/files/rrdcachedLib.sh
new file mode 100644
index 0000000..8b7c257
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/rrdcachedLib.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+RRDCACHED_BIN=/usr/bin/rrdcached;
+RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
+RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
+RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
+
+function getRrdcachedLoggedPid()
+{
+    if [ -e "${RRDCACHED_PID_FILE}" ]
+    then
+        echo `cat ${RRDCACHED_PID_FILE}`;
+    fi
+}
+
+function getRrdcachedRunningPid()
+{
+    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
+
+    if [ -n "${rrdcachedLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/setupGanglia.sh b/hmc/puppet/modules/hdp2-ganglia/files/setupGanglia.sh
new file mode 100644
index 0000000..93ea922
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/setupGanglia.sh
@@ -0,0 +1,128 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh
+
+function usage()
+{
+  cat << END_USAGE
+Usage: ${0} [-c <gmondClusterName> [-m]] [-t]
+
+Options:
+  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
+
+  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
+                          Cluster. Without this, we generate slave gmond configuration.
+
+  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
+                          gmond configuration that is generated without this).
+END_USAGE
+}
+
+function instantiateGmetadConf()
+{
+  # gmetad utility library.
+  source ./gmetadLib.sh;
+
+  generateGmetadConf > ${GMETAD_CONF_FILE};
+}
+
+function instantiateGmondConf()
+{
+  # gmond utility library.
+  source ./gmondLib.sh;
+ 
+  gmondClusterName=${1};
+
+  if [ "x" != "x${gmondClusterName}" ]
+  then
+
+    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
+    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
+    
+    # Always blindly generate the core gmond config - that goes on every box running gmond. 
+    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
+
+    isMasterGmond=${2};
+
+    # Decide whether we want to add on the master or slave gmond config.
+    if [ "0" -eq "${isMasterGmond}" ]
+    then
+      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
+    else
+      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
+    fi
+
+  else
+    echo "No gmondClusterName passed in, nothing to instantiate";
+  fi
+}
+
+# main()
+
+gmondClusterName=;
+isMasterGmond=0;
+configureGmetad=0;
+
+while getopts ":c:mt" OPTION
+do
+  case ${OPTION} in
+    c) 
+      gmondClusterName=${OPTARG};
+      ;;
+    m)
+      isMasterGmond=1;
+      ;;
+    t)
+      configureGmetad=1;
+      ;;
+    ?)
+      usage;
+      exit 1;
+  esac
+done
+
+# Initialization.
+createDirectory ${GANGLIA_CONF_DIR};
+createDirectory ${GANGLIA_RUNTIME_DIR};
+# So rrdcached can drop its PID files in here.
+chmod a+w ${GANGLIA_RUNTIME_DIR};
+
+if [ -n "${gmondClusterName}" ]
+then
+
+  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
+  if [ "1" -eq "${configureGmetad}" ]
+  then
+    instantiateGmetadConf;
+  else
+    instantiateGmondConf ${gmondClusterName} ${isMasterGmond};
+  fi
+
+elif [ "1" -eq "${configureGmetad}" ]
+then
+  instantiateGmetadConf;
+else
+  usage;
+  exit 2;
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/startGmetad.sh b/hmc/puppet/modules/hdp2-ganglia/files/startGmetad.sh
new file mode 100644
index 0000000..1cfd3f1
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/startGmetad.sh
@@ -0,0 +1,57 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
+source ./rrdcachedLib.sh;
+
+# Before starting gmetad, start rrdcached.
+./startRrdcached.sh;
+
+if [ $? -eq 0 ] 
+then
+    gmetadRunningPid=`getGmetadRunningPid`;
+
+    # Only attempt to start gmetad if there's not already one running.
+    if [ -z "${gmetadRunningPid}" ]
+    then
+        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
+
+        gmetadRunningPid=`getGmetadRunningPid`;
+
+        if [ -n "${gmetadRunningPid}" ]
+        then
+            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
+        else
+            echo "Failed to start ${GMETAD_BIN}";
+            exit 1;
+        fi
+    else
+        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
+    fi
+else
+    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
+    exit 2;
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/startGmond.sh b/hmc/puppet/modules/hdp2-ganglia/files/startGmond.sh
new file mode 100644
index 0000000..cf0e8f6
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/startGmond.sh
@@ -0,0 +1,73 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function startGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only attempt to start gmond if there's not already one running.
+    if [ -z "${gmondRunningPid}" ]
+    then
+      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+      if [ -e "${gmondCoreConfFileName}" ]
+      then 
+        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
+  
+        gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+  
+        if [ -n "${gmondRunningPid}" ]
+        then
+            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
+        else
+            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
+            exit 1;
+        fi
+      fi 
+    else
+      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so start 
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        startGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just start the one ${gmondClusterName} that was asked for.
+    startGmondForCluster ${gmondClusterName};
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/startRrdcached.sh b/hmc/puppet/modules/hdp2-ganglia/files/startRrdcached.sh
new file mode 100644
index 0000000..181acbf
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/startRrdcached.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    #changed because problem puppet had with nobody user
+    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+    #         -b /var/lib/ganglia/rrds -B
+    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b /var/lib/ganglia/rrds -B"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/stopGmetad.sh b/hmc/puppet/modules/hdp2-ganglia/files/stopGmetad.sh
new file mode 100644
index 0000000..2764e0e
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/stopGmetad.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/stopGmond.sh b/hmc/puppet/modules/hdp2-ganglia/files/stopGmond.sh
new file mode 100644
index 0000000..01eb87c
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/stopGmond.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill -HUP ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/stopRrdcached.sh b/hmc/puppet/modules/hdp2-ganglia/files/stopRrdcached.sh
new file mode 100644
index 0000000..0a0d8d8
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/stopRrdcached.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 
diff --git a/hmc/puppet/modules/hdp2-ganglia/files/teardownGanglia.sh b/hmc/puppet/modules/hdp2-ganglia/files/teardownGanglia.sh
new file mode 100644
index 0000000..b27f7a2
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/files/teardownGanglia.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/config.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/config.pp
new file mode 100644
index 0000000..ba09993
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/config.pp
@@ -0,0 +1,59 @@
+class hdp2-ganglia::config(
+  $ganglia_server_host = undef,
+  $service_state = $hdp2::params::cluster_service_state
+)
+{
+ if ($service_state in ['running','installed_and_configured','stopped']) {
+    #TODO: divide into what is needed on server vs what is needed on monitored nodes
+    $shell_cmds_dir = $hdp2-ganglia::params::ganglia_shell_cmds_dir
+    $shell_files = ['checkGmond.sh','checkRrdcached.sh','gmetadLib.sh','gmondLib.sh','rrdcachedLib.sh' ,'setupGanglia.sh','startGmetad.sh','startGmond.sh','startRrdcached.sh','stopGmetad.sh','stopGmond.sh','stopRrdcached.sh','teardownGanglia.sh']
+
+    hdp2::directory_recursive_create { $shell_cmds_dir :
+      owner => root,
+      group => root
+    } 
+
+     hdp2-ganglia::config::init_file { ['gmetad','gmond']: }
+
+     hdp2-ganglia::config::shell_file { $shell_files: }                       
+
+     hdp2-ganglia::config::file { ['gangliaClusters.conf','gangliaEnv.sh','gangliaLib.sh']: 
+       ganglia_server_host => $ganglia_server_host
+     }
+ 
+     anchor{'hdp2-ganglia::config::begin':} -> Hdp2::Directory_recursive_create[$shell_cmds_dir] -> Hdp2-ganglia::Config::Shell_file<||> -> anchor{'hdp2-ganglia::config::end':}
+     Anchor['hdp2-ganglia::config::begin'] -> Hdp2-ganglia::Config::Init_file<||> -> Anchor['hdp2-ganglia::config::end']
+     Anchor['hdp2-ganglia::config::begin'] -> Hdp2-ganglia::Config::File<||> -> Anchor['hdp2-ganglia::config::end']
+  }
+}
+
+define hdp2-ganglia::config::shell_file()
+{
+  file { "${hdp2-ganglia::params::ganglia_shell_cmds_dir}/${name}":
+    source => "puppet:///modules/hdp2-ganglia/${name}", 
+    mode => '0755'
+  }
+}
+
+define hdp2-ganglia::config::init_file()
+{
+  file { "/etc/init.d/hdp-${name}":
+    source => "puppet:///modules/hdp2-ganglia/${name}.init", 
+    mode => '0755'
+  }
+}
+
+### config files
+define hdp2-ganglia::config::file(
+  $ganglia_server_host = undef
+)
+{
+  hdp2::configfile { "${hdp2-ganglia::params::ganglia_shell_cmds_dir}/${name}":
+    component           => 'ganglia',
+    owner               => root,
+    group               => root
+  }
+  if ($ganglia_server_host != undef) {
+    Hdp2::Configfile<||>{ganglia_server_host => $ganglia_server_host}
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/config/generate_monitor.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/config/generate_monitor.pp
new file mode 100644
index 0000000..c0d8b6b
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/config/generate_monitor.pp
@@ -0,0 +1,21 @@
+#TODO: these scripts called shoudl be converetd to native puppet
+define hdp2-ganglia::config::generate_monitor(
+  $ganglia_service,
+  $role
+)
+{
+  $shell_cmds_dir = $hdp2-ganglia::params::ganglia_shell_cmds_dir
+  $cmd = $ganglia_service ? {
+    'gmond'  => $role ? {
+      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m",
+       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name}"
+    },
+    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t",
+     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
+  }
+
+  #TODO: put in test condition
+  hdp::exec { $cmd:
+    command => $cmd
+ }
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/config/generate_server.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/config/generate_server.pp
new file mode 100644
index 0000000..aa1f8af
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/config/generate_server.pp
@@ -0,0 +1,22 @@
+#TODO: these scripts called shoudl be converetd to native puppet
+define hdp2-ganglia::config::generate_server(
+  $ganglia_service,
+  $role
+)
+{
+  $shell_cmds_dir = $hdp2-ganglia::params::ganglia_shell_cmds_dir
+
+  $cmd = $ganglia_service ? {
+    'gmond'  => $role ? {
+      'server' => "${shell_cmds_dir}/setupGanglia.sh -c ${name} -m",
+       default =>  "${shell_cmds_dir}/setupGanglia.sh -c ${name}"
+    },
+    'gmetad' => "${shell_cmds_dir}/setupGanglia.sh -t",
+     default => hdp_fail("Unexpected ganglia service: ${$ganglia_service}")	
+  }
+
+  #TODO: put in test condition
+  hdp::exec { $cmd:
+    command => $cmd
+ }
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/hdp-gmetad/service_check.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/hdp-gmetad/service_check.pp
new file mode 100644
index 0000000..5b4e2c4
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/hdp-gmetad/service_check.pp
@@ -0,0 +1,16 @@
+class hdp2-ganglia::hdp-gmetad::service_check() 
+{
+  
+  anchor { 'hdp2-ganglia::hdp-gmetad::service_check::begin':}
+
+  exec { 'hdp-gmetad':
+    command   => "/etc/init.d/hdp-gmetad status | grep -v failed",
+    tries     => 3,
+    try_sleep => 5,
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    before      => Anchor['hdp2-ganglia::hdp-gmetad::service_check::end'],
+    logoutput => "true"
+  }
+
+  anchor{ 'hdp2-ganglia::hdp-gmetad::service_check::end':}
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/hdp-gmond/service_check.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/hdp-gmond/service_check.pp
new file mode 100644
index 0000000..1a54697
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/hdp-gmond/service_check.pp
@@ -0,0 +1,16 @@
+class hdp2-ganglia::hdp-gmond::service_check() 
+{
+  
+  anchor { 'hdp2-ganglia::hdp-gmond::service_check::begin':}
+
+  exec { 'hdp-gmond':
+    command   => "/etc/init.d/hdp-gmond status | grep -v failed",
+    tries     => 3,
+    try_sleep => 5,
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    before      => Anchor['hdp2-ganglia::hdp-gmond::service_check::end'],
+    logoutput => "true"
+  }
+
+  anchor{ 'hdp2-ganglia::hdp-gmond::service_check::end':}
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/init.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/init.pp
new file mode 100644
index 0000000..06048e3
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/init.pp
@@ -0,0 +1,17 @@
+class hdp2-ganglia(
+  $service_state
+)
+{
+  if (($service_state != 'no_op') or ($service_state != 'uninstalled')) {
+    include hdp2-ganglia::params
+    $gmetad_user = $hdp2-ganglia::params::gmetad_user
+    $gmond_user = $hdp2-ganglia::params::gmond_user
+  
+    user { $gmond_user : shell => '/bin/bash'} #provision for nobody user
+    if ( $gmetad_user != $gmond_user) {
+      user { $gmetad_user : shell => '/bin/bash'} #provision for nobody user
+    }
+    anchor{'hdp2-ganglia::begin':} -> User<|title == $gmond_user or title == $gmetad_user|> ->  anchor{'hdp2-ganglia::end':}
+  }
+}
+
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/monitor.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/monitor.pp
new file mode 100644
index 0000000..ce65504
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/monitor.pp
@@ -0,0 +1,95 @@
+class hdp2-ganglia::monitor(
+  $service_state = $hdp2::params::cluster_service_state,
+  $ganglia_server_host = undef,
+  $opts = {}
+) inherits hdp2-ganglia::params
+{
+  if  ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {     
+
+   hdp2::package { 'ganglia-monitor':         
+       ensure      => 'uninstalled', 
+      java_needed => false      
+   }
+
+  } else {
+    if ($hdp2::params::service_exists['hdp2-ganglia::server'] != true) {
+      class { 'hdp2-ganglia':
+       service_state => $service_state
+      }
+    }
+
+    hdp2::package { 'ganglia-monitor': }
+
+    if ($hdp2::params::service_exists['hdp2-ganglia::server'] != true) {
+      class { 'hdp2-ganglia::config': ganglia_server_host => $ganglia_server_host}
+    }
+
+#    if (($hdp2::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp2::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp2::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp2::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp2::params::service_exists['hdp-hadoop::client'] == true) or ($hdp2::params::service_exists['hdp-hadoop::snamenode'] == true)) {
+#     class { 'hdp-hadoop::enable-ganglia': }
+#   }
+
+    if ($service_exists['hdp-hbase::master'] == true) {
+      class { 'hdp-hbase::master::enable-ganglia': }
+    }
+  
+    if ($service_exists['hdp-hbase::regionserver'] == true) {
+      class { 'hdp-hbase::regionserver::enable-ganglia': }
+    }
+
+    class { 'hdp2-ganglia::monitor::config-gen': }
+  
+    class { 'hdp2-ganglia::monitor::gmond': ensure => $service_state}
+
+    if ($hdp2::params::service_exists['hdp2-ganglia::server'] != true) {
+      Class['hdp2-ganglia'] -> Hdp2::Package['ganglia-monitor'] -> Class['hdp2-ganglia::config'] -> 
+      Class['hdp2-ganglia::monitor::config-gen'] -> Class['hdp2-ganglia::monitor::gmond']
+    } else {
+      Hdp2::Package['ganglia-monitor'] ->  Class['hdp2-ganglia::monitor::config-gen'] -> Class['hdp2-ganglia::monitor::gmond']
+    }
+  }
+}
+
+
+class hdp2-ganglia::monitor::config-gen()
+{
+
+  $service_exists = $hdp2::params::service_exists
+
+  if ($service_exists['hdp2-hadoop::namenode'] == true) {
+    hdp2-ganglia::config::generate_monitor { 'HDPNameNode':}
+  }
+  if ($service_exists['hdp2-hadoop::resourcemanager'] == true){
+    hdp2-ganglia::config::generate_monitor { 'HDPResourceManager':}
+  }
+  if ($service_exists['hdp2-hbase::master'] == true) {
+    hdp2-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
+  }
+  if ($service_exists['hdp2-hadoop::datanode'] == true) {
+    hdp2-ganglia::config::generate_monitor { 'HDPSlaves':}
+  }
+  Hdp2-ganglia::Config::Generate_monitor<||>{
+    ganglia_service => 'gmond',
+    role => 'monitor'
+  }
+   # 
+  anchor{'hdp2-ganglia::monitor::config-gen::begin':} -> Hdp2-ganglia::Config::Generate_monitor<||> -> anchor{'hdp2-ganglia::monitor::config-gen::end':}
+}
+
+class hdp2-ganglia::monitor::gmond(
+  $ensure
+  )
+{
+  if ($ensure == 'running') {
+    $command = "service hdp-gmond start >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
+   } elsif  ($ensure == 'stopped') {
+    $command = "service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"
+  }
+  if ($ensure == 'running' or $ensure == 'stopped') {
+    hdp::exec { "hdp-gmond service" :
+      command => "$command",
+      unless => "/bin/ps auwx | /bin/grep [g]mond",
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    }
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/monitor_and_server.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/monitor_and_server.pp
new file mode 100644
index 0000000..b50feb0
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/monitor_and_server.pp
@@ -0,0 +1,70 @@
+class hdp2-ganglia::monitor_and_server(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-ganglia::params
+{
+  $ganglia_shell_cmds_dir = $hdp2-ganglia::params::ganglia_shell_cmds_dir
+  $ganglia_conf_dir = $hdp2-ganglia::params::ganglia_conf_dir
+  $ganglia_runtime_dir = $hdp2-ganglia::params::ganglia_runtime_dir
+
+  #note: includes the common package ganglia-monitor
+  class { 'hdp2-ganglia':
+    service_state => $service_state
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['uninstalled']) {
+    class { 'hdp2-ganglia::server::packages':
+      ensure => 'uninstalled'
+      }
+
+    hdp::directory { [$ganglia_conf_dir,$ganglia_runtime_dir]:
+      service_state => $service_state,
+      force => true
+    }
+    
+    class { 'hdp2-ganglia::config':
+      service_state => $service_state
+    }
+
+    Class['hdp2-ganglia'] -> Class['hdp2-ganglia::server::packages'] -> 
+      Hdp2::Directory[$ganglia_conf_dir] -> Hdp2::Directory[$ganglia_runtime_dir] ->
+      Class['hdp2-ganglia::config']
+  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+    class { 'hdp2-ganglia::server::packages': }
+
+    class { 'hdp2-ganglia::config': 
+     ganglia_server_host => $hdp2::params::host_address,
+     service_state       => $service_state
+     }
+
+    class {'hdp2-ganglia::monitor::config-gen': }      
+
+    class {'hdp2-ganglia::server::config-gen': }      
+    
+    hdp2-ganglia::config::generate_server { 'gmetad':
+      ganglia_service => 'gmetad'
+    }
+
+    class { 'hdp2-ganglia::service::gmond': 
+      ensure => $service_state
+    }
+
+    class { 'hdp2-ganglia::server::services' : 
+      service_state => $service_state,
+      monitor_and_server_single_node => true
+    }
+
+    class { 'hdp2-ganglia::service::change_permission':
+      ensure => $service_state
+    }
+
+    #top level no anchors needed
+    Class['hdp2-ganglia'] -> Class['hdp2-ganglia::server::packages'] -> Class['hdp2-ganglia::config'] -> 
+      Class['hdp2-ganglia::monitor::config-gen'] -> Class['hdp2-ganglia::server::config-gen'] -> Hdp2-ganglia::Config::Generate_server['gmetad'] ->
+      Class['hdp2-ganglia::service::gmond'] -> Class['hdp2-ganglia::server::services'] ->
+      Class['hdp2-ganglia::service::change_permission']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/params.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/params.pp
new file mode 100644
index 0000000..ad51181
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/params.pp
@@ -0,0 +1,12 @@
+class hdp2-ganglia::params() inherits hdp2::params
+{
+  $ganglia_conf_dir = "/etc/ganglia/hdp"
+  $ganglia_runtime_dir = "/var/run/ganglia/hdp"
+
+  $ganglia_shell_cmds_dir = hdp_default("ganglia_shell_cmd_dir","/usr/libexec/hdp/ganglia")
+  
+  $gmetad_user = $hdp2::params::gmetad_user
+  $gmond_user = $hdp2::params::gmond_user
+
+  $webserver_group = hdp_default("hadoop/gangliaEnv/webserver_group","apache")
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/manifests/server.pp b/hmc/puppet/modules/hdp2-ganglia/manifests/server.pp
new file mode 100644
index 0000000..05d54a7
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/manifests/server.pp
@@ -0,0 +1,84 @@
+class hdp2-ganglia::server(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp2-ganglia::params
+{
+  $hdp2::params::service_exists['hdp2-ganglia::server'] = true
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {
+
+   class { 'hdp2-ganglia::server::packages':
+      ensure => 'uninstalled'
+   }
+
+  } else {
+  class { 'hdp2-ganglia':
+    service_state => $service_state
+  }
+
+  class { 'hdp2-ganglia::server::packages': }
+
+  class { 'hdp2-ganglia::config': 
+    ganglia_server_host => $hdp2::params::host_address,
+    service_state       => $service_state 
+  }
+
+  hdp2-ganglia::config::generate_server { ['HDPNameNode','HDPResourceManager','HDPHBaseMaster','HDPSlaves']:
+    ganglia_service => 'gmond',
+    role => 'server'
+  }
+  hdp2-ganglia::config::generate_server { 'gmetad':
+    ganglia_service => 'gmetad',
+    role => 'server'
+  }
+
+  class { 'hdp2-ganglia::server::gmetad': ensure => $service_state}
+
+  class { 'hdp2-ganglia::service::change_permission': ensure => $service_state }
+
+  #top level does not need anchors
+  Class['hdp2-ganglia'] -> Class['hdp2-ganglia::server::packages'] -> Class['hdp2-ganglia::config'] -> 
+    Hdp2-ganglia::Config::Generate_server<||> -> Class['hdp2-ganglia::server::gmetad'] -> Class['hdp2-ganglia::service::change_permission']
+ }
+}
+
+class hdp2-ganglia::server::packages(
+  $ensure = present 
+)
+{
+  hdp2::package { ['ganglia-server','ganglia-gweb','ganglia-hdp-gweb-addons']: 
+    ensure      => $ensure,
+    java_needed => false  
+  } 
+}
+
+
+class hdp2-ganglia::service::change_permission(
+  $ensure
+)
+{
+  if ($ensure == 'running' or $ensure == 'installed_and_configured') {
+    hdp2::directory_recursive_create { '/var/lib/ganglia/dwoo' :
+      mode => '0777'
+      }
+  }
+}
+
+class hdp2-ganglia::server::gmetad(
+  $ensure
+)
+{
+  if ($ensure == 'running') {
+    $command = "service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+   } elsif  ($ensure == 'stopped') {
+    $command = "service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  }
+  if ($ensure == 'running' or $ensure == 'stopped') {
+    hdp::exec { "hdp-gmetad service" :
+      command => "$command",
+      unless => "/bin/ps auwx | /bin/grep [g]metad",
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    }
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-ganglia/templates/gangliaClusters.conf.erb b/hmc/puppet/modules/hdp2-ganglia/templates/gangliaClusters.conf.erb
new file mode 100644
index 0000000..5bcb326
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/templates/gangliaClusters.conf.erb
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+    HDPSlaves		<%=scope.function_hdp_host("ganglia_server_host")%>  8660
+    HDPNameNode         <%=scope.function_hdp_host("ganglia_server_host")%>  8661
+    HDPResourceManager  <%=scope.function_hdp_host("ganglia_server_host")%>  8662
+    HDPHBaseMaster	<%=scope.function_hdp_host("ganglia_server_host")%>  8663
diff --git a/hmc/puppet/modules/hdp2-ganglia/templates/gangliaEnv.sh.erb b/hmc/puppet/modules/hdp2-ganglia/templates/gangliaEnv.sh.erb
new file mode 100644
index 0000000..4be541d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/templates/gangliaEnv.sh.erb
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+GMETAD_USER=<%=scope.function_hdp_template_var("gmetad_user")%>;
+GMOND_USER=<%=scope.function_hdp_template_var("gmond_user")%>;
+WEBSERVER_GROUP=<%=scope.function_hdp_template_var("webserver_group")%>;
diff --git a/hmc/puppet/modules/hdp2-ganglia/templates/gangliaLib.sh.erb b/hmc/puppet/modules/hdp2-ganglia/templates/gangliaLib.sh.erb
new file mode 100644
index 0000000..2246789
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-ganglia/templates/gangliaLib.sh.erb
@@ -0,0 +1,61 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR=<%=scope.function_hdp_template_var("ganglia_conf_dir")%>;
+GANGLIA_RUNTIME_DIR=<%=scope.function_hdp_template_var("ganglia_runtime_dir")%>;
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/files/checkForFormat.sh b/hmc/puppet/modules/hdp2-hadoop/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/.directory b/hmc/puppet/modules/hdp2-hadoop/manifests/.directory
new file mode 100644
index 0000000..f30512d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/.directory
@@ -0,0 +1,3 @@
+[Dolphin]
+Timestamp=2011,3,16,9,26,14
+ViewMode=1
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/client.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/client.pp
new file mode 100644
index 0000000..a1b858b
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/client.pp
@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::client(
+  $service_state = $hdp2::params::cluster_client_state
+) inherits hdp2::params
+{
+  $hdp2::params::service_exists['hdp2-hadoop::client'] = true
+
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp2::params::use_32_bits_on_slaves == true) {
+    Hdp2-hadoop::Package<||>{include_32_bit => true}
+    Hdp2-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp2-hadoop::Package<||>{include_64_bit => true}
+    Hdp2-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/datanode.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/datanode.pp
new file mode 100644
index 0000000..8ac58e6
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/datanode.pp
@@ -0,0 +1,88 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::datanode(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hadoop::params 
+{
+
+  $hdp2::params::service_exists['hdp2-hadoop::datanode'] = true
+
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp2::params::use_32_bits_on_slaves == true) {
+    Hdp2-hadoop::Package<||>{include_32_bit => true}
+    Hdp2-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp2-hadoop::Package<||>{include_64_bit => true}
+    Hdp2-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $dfs_data_dir = $hdp2-hadoop::params::dfs_data_dir
+  
+    if (($hdp2::params::service_exists['hdp2-hadoop::namenode'] == true) or ($hdp2::params::service_exists['hdp2-hadoop::snamenode'] == true)){
+      $a_namenode_on_node = true
+    } else {
+      $a_namenode_on_node = false
+    }
+
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+  
+    hdp2-hadoop::datanode::create_data_dirs { $dfs_data_dir: 
+      service_state => $service_state
+    }
+
+    if ($a_namenode_on_node == true){
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+    
+    hdp2-hadoop::service{ 'datanode':
+      ensure         => $service_state,
+      user           => $hdp2-hadoop::params::hdfs_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+    
+    #top level does not need anchors
+    Class['hdp2-hadoop'] -> Hdp2-hadoop::Service['datanode']
+    Hdp2-hadoop::Datanode::Create_data_dirs<||> -> Hdp2-hadoop::Service['datanode']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::datanode::create_data_dirs($service_state)
+{
+  $dirs = hdp_array_from_comma_list($name)
+  hdp2::directory_recursive_create { $dirs :
+    owner => $hdp2-hadoop::params::hdfs_user,
+    mode => '0750',
+    service_state => $service_state,
+    force => true
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/copyfromlocal.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/copyfromlocal.pp
new file mode 100644
index 0000000..059483e
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/copyfromlocal.pp
@@ -0,0 +1,74 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2-hadoop::hdfs::copyfromlocal(
+  $service_state,
+  $owner = unset,
+  $group = unset,
+  $recursive_chown = false,
+  $mode = undef,
+  $recursive_chmod = false,
+  $dest_dir = undef 
+) 
+{
+ 
+  if ($service_state == 'running') {
+    $copy_cmd = "fs -copyFromLocal ${name} ${dest_dir}"
+    hdp2-hadoop::exec-hadoop { $copy_cmd:
+      command => $copy_cmd,
+      unless => "hadoop fs -ls ${dest_dir} >/dev/null 2>&1"
+    }
+    if ($owner == unset) {
+      $chown = ""
+    } else {
+      if ($group == unset) {
+        $chown = $owner
+      } else {
+        $chown = "${owner}:${group}"
+     } 
+    }  
+ 
+    if (chown != "") {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chown == true) {
+        $chown_cmd = "fs -chown -R ${chown} ${dest_dir}"
+      } else {
+        $chown_cmd = "fs -chown ${chown} ${dest_dir}"
+      }
+      hdp2-hadoop::exec-hadoop {$chown_cmd :
+        command => $chown_cmd
+      }
+      Hdp2-hadoop::Exec-hadoop[$copy_cmd] -> Hdp2-hadoop::Exec-hadoop[$chown_cmd]
+    }
+  
+    if ($mode != undef) {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_mode == true) {
+        $chmod_cmd = "fs -chmod -R ${mode} ${dest_dir}"
+      } else {
+        $chmod_cmd = "fs -chmod ${mode} ${dest_dir}"
+      }
+      hdp2-hadoop::exec-hadoop {$chmod_cmd :
+        command => $chmod_cmd
+      }
+      Hdp2-hadoop::Exec-hadoop[$copy_cmd] -> Hdp2-hadoop::Exec-hadoop[$chmod_cmd]
+    }
+  }       
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/directory.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/directory.pp
new file mode 100644
index 0000000..eba6c2a
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/directory.pp
@@ -0,0 +1,74 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: unset should br changed to undef; just to be consistent
+define hdp2-hadoop::hdfs::directory(
+  $service_state = 'running',
+  $owner = unset,
+  $group = unset,
+  $recursive_chown = false,
+  $mode = undef,
+  $recursive_chmod = false
+) 
+{
+ 
+  if ($service_state == 'running') {
+    $mkdir_cmd = "fs -mkdir -p ${name}"
+    hdp2-hadoop::exec-hadoop { $mkdir_cmd:
+      command => $mkdir_cmd,
+      unless => "hadoop fs -ls ${name} >/dev/null 2>&1"
+    }
+    if ($owner == unset) {
+      $chown = ""
+    } else {
+      if ($group == unset) {
+        $chown = $owner
+      } else {
+        $chown = "${owner}:${group}"
+     } 
+    }  
+ 
+    if (chown != "") {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chown == true) {
+        $chown_cmd = "fs -chown -R ${chown} ${name}"
+      } else {
+        $chown_cmd = "fs -chown ${chown} ${name}"
+      }
+      hdp2-hadoop::exec-hadoop {$chown_cmd :
+        command => $chown_cmd
+      }
+      Hdp2-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp2-hadoop::Exec-hadoop[$chown_cmd]
+    }
+  
+    if ($mode != undef) {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_mode == true) {
+        $chmod_cmd = "fs -chmod -R ${mode} ${name}"
+      } else {
+        $chmod_cmd = "fs -chmod ${mode} ${name}"
+      }
+      hdp2-hadoop::exec-hadoop {$chmod_cmd :
+        command => $chmod_cmd
+      }
+      Hdp2-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp2-hadoop::Exec-hadoop[$chmod_cmd]
+    }
+  }       
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/service_check.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/service_check.pp
new file mode 100644
index 0000000..b54b9a8
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/service_check.pp
@@ -0,0 +1,94 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::hdfs::service_check()
+{
+  $unique = hdp_unique_id_and_date()
+  $dir = '/tmp'
+  $tmp_file = "${dir}/${unique}"
+  $user_dir='/user'
+  $apps_dir='/apps'
+
+  $safemode_command = "dfsadmin -safemode get | grep OFF"
+
+  $create_dir_cmd = "fs -mkdir ${dir} ; hadoop fs -chmod -R 777 ${dir} ; hadoop fs -mkdir ${user_dir} ; hadoop fs -chmod 755 ${user_dir} ; hadoop fs -mkdir ${apps_dir} ; hadoop fs -chmod 755 ${apps_dir} ;"
+  $test_dir_exists = "hadoop fs -test -e ${dir}" #TODO: may fix up fact that test needs explicit hadoop while omamnd does not
+  $cleanup_cmd = "fs -rm ${tmp_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "${cleanup_cmd}; hadoop fs -put /etc/passwd ${tmp_file}" #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${tmp_file}"
+
+  anchor { 'hdp2-hadoop::hdfs::service_check::begin':}
+
+  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::check_safemode':
+    command   => $safemode_command,
+    tries     => 40,
+    try_sleep => 15,
+    logoutput => true,
+    require   => Anchor['hdp2-hadoop::hdfs::service_check::begin']
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::create_dir':
+    command   => $create_dir_cmd,
+    unless    => $test_dir_exists,
+    tries     => 3,
+    try_sleep => 5,
+    require   => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::check_safemode']
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 3,
+    try_sleep => 5,
+    require   => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::create_dir'],
+    notify    => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::test']
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::create_file'],
+    #notify      => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::cleanup']  #TODO: put in after testing
+    before      => Anchor['hdp2-hadoop::hdfs::service_check::end'] #TODO: remove after testing
+  }
+
+   #TODO: put in after testing
+ #  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::cleanup':
+ #   command     => $cleanup_cmd,
+ #   refreshonly => true,
+ #   require     => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::test'],
+ #   before      => Anchor['hdp2-hadoop::hdfs::service_check::end']
+  #}
+  anchor{ 'hdp2-hadoop::hdfs::service_check::end':}
+
+  class { 'hdp2-hadoop::hdfs-directories' :
+    service_state => running  }
+}
+
+class hdp2-hadoop::hdfs-directories($service_state)
+{
+  $smoke_test_user = $hdp2::params::smokeuser
+  hdp2-hadoop::hdfs::directory{ "/user/${smoke_test_user}":
+    service_state => $service_state,
+    owner => $smoke_test_user,
+    mode  => '770',
+    recursive_chmod => true
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/init.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/init.pp
new file mode 100644
index 0000000..3195934
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/init.pp
@@ -0,0 +1,194 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp2-hadoop and still use include
+define hdp2-hadoop::common(
+  $service_states = []
+)
+{
+  class { 'hdp2-hadoop':
+    service_states => $service_states    
+  }
+  anchor{'hdp2-hadoop::common::begin':} -> Class['hdp2-hadoop'] -> anchor{'hdp2-hadoop::common::end':} 
+}
+
+class hdp2-hadoop::initialize()
+{
+  if ($hdp2::params::component_exists['hdp2-hadoop'] == true) {
+  } else {
+    $hdp2::params::component_exists['hdp2-hadoop'] = true
+  }
+  hdp2-hadoop::common { 'common':}
+  anchor{'hdp2-hadoop::initialize::begin':} -> Hdp2-hadoop::Common['common'] -> anchor{'hdp2-hadoop::initialize::end':}
+}
+
+class hdp2-hadoop(
+  $service_states  = []
+)
+{
+  include hdp2-hadoop::params
+  $hadoop_config_dir = $hdp2-hadoop::params::conf_dir
+  $hdfs_user = $hdp2-hadoop::params::hdfs_user  
+  $yarn_user = $hdp2-hadoop::params::yarn_user  
+  $mapred_user = $hdp2-hadoop::params::mapred_user  
+
+  anchor{'hdp2-hadoop::begin':} 
+  anchor{'hdp2-hadoop::end':} 
+
+  if ('uninstalled' in $service_states) {
+    hdp2-hadoop::package { 'hadoop':
+      ensure => 'uninstalled'
+    }
+
+    hdp2::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp2-hadoop::begin'] -> Hdp2-hadoop::Package<||> -> Hdp2::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp2-hadoop::end']
+  } else {
+    
+    hdp2-hadoop::package { 'hadoop':}
+
+
+    hdp2::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+ 
+    hdp2::user { $hdfs_user:}
+    hdp2::user { $yarn_user:}
+    hdp2::user { $mapred_user:}
+
+    $logdirprefix = $hdp2-hadoop::params::hadoop_logdirprefix
+    hdp2::directory_recursive_create { $logdirprefix: 
+        owner => 'root'
+    }
+    $piddirprefix = $hdp2-hadoop::params::hadoop_piddirprefix
+    hdp2::directory_recursive_create { $piddirprefix: 
+        owner => 'root'
+    }
+ 
+    $common_hdfs_template_files = ['hadoop-env.sh','core-site.xml','hdfs-site.xml','hadoop-policy.xml','health_check','hadoop-metrics2.properties','commons-logging.properties','log4j.properties','slaves']
+    hdp2-hadoop::configfile { $common_hdfs_template_files:
+      tag   => 'common', 
+      owner => $hdfs_user
+    }
+    
+    $yarn_template_files = ['yarn-site.xml','yarn-env.sh','container-executor.cfg','capacity-scheduler.xml']
+    hdp2-hadoop::configfile { $yarn_template_files: 
+      tag => 'common', 
+      owner => $yarn_user
+    }
+
+    hdp2-hadoop::configfile { 'mapred-site.xml': 
+      tag => 'common', 
+      owner => $mapred_user
+    }
+
+    Anchor['hdp2-hadoop::begin'] -> Hdp2-hadoop::Package<||> ->  Hdp2::Directory_recursive_create[$hadoop_config_dir] ->  Hdp2::User<|title == $hdfs_user or title == $yarn_user or title == $mapred_user|> 
+    -> Hdp2-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp2-hadoop::end']
+    Anchor['hdp2-hadoop::begin'] -> Hdp2::Directory_recursive_create[$logdirprefix] -> Anchor['hdp2-hadoop::end']
+    Anchor['hdp2-hadoop::begin'] -> Hdp2::Directory_recursive_create[$piddirprefix] -> Anchor['hdp2-hadoop::end']
+  }
+}
+
+class hdp2-hadoop::enable-ganglia()
+{
+  Hdp2-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
+}
+
+###config file helper
+define hdp2-hadoop::configfile(
+  $owner = undef,
+  $hadoop_conf_dir = $hdp2-hadoop::params::conf_dir,
+  $mode = undef,
+  $namenode_host = undef,
+  $yarn_rm_host = undef,
+  $snamenode_host = undef,
+  $template_tag = undef,
+  $size = undef, #TODO: deprecate
+  $sizes = []
+) 
+{
+  #TODO: may need to be fixed 
+  if ($yarn_rm_host == undef) {
+    $calc_yarn_rm_host = $namenode_host
+  } else {
+    $calc_yarn_rm_host = $yarn_rm_host 
+  }
+ 
+  #only set 32 if theer is a 32 bit component and no 64 bit components
+  if (64 in $sizes) {
+    $common_size = 64
+  } elsif (32 in $sizes) {
+    $common_size = 32
+  } else {
+    $common_size = 6
+  }
+  
+  hdp2::configfile { "${hadoop_conf_dir}/${name}":
+    component      => 'hadoop',
+    owner          => $owner,
+    mode           => $mode,
+    namenode_host  => $namenode_host,
+    snamenode_host => $snamenode_host,
+    yarn_rm_host   => $calc_yarn_rm_host,
+    template_tag   => $template_tag,
+    size           => $common_size
+  }
+}
+
+#####
+define hdp2-hadoop::exec-hadoop(
+  $command,
+  $unless = undef,
+  $refreshonly = undef,
+  $echo_yes = false,
+  $tries = 1,
+  $timeout = 900,
+  $try_sleep = undef,
+  $user = undef,
+  $logoutput = undef
+)
+{
+  include hdp2-hadoop::params
+  $conf_dir = $hdp2-hadoop::params::conf_dir
+  if ($echo_yes == true) {
+    $cmd = "yes Y | hadoop --config ${conf_dir} ${command}"
+  } else {
+    $cmd = "hadoop --config ${conf_dir} ${command}"     
+  }
+  if ($user == undef) {
+   $run_user = $hdp2-hadoop::params::hdfs_user
+  } else {
+    $run_user = $user
+  }
+  hdp2::exec { $cmd:
+    command     => $cmd,
+    user        => $run_user,
+    unless      => $unless,
+    refreshonly => $refreshonly,
+    tries       => $tries,
+    timeout     => $timeout,
+    try_sleep   => $try_sleep,
+    logoutput   => $logoutput
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/mapred/service_check.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/mapred/service_check.pp
new file mode 100644
index 0000000..fff4cf8
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/mapred/service_check.pp
@@ -0,0 +1,75 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::mapred::service_check() 
+{
+  $smoke_test_user = $hdp2::params::smokeuser
+  $mapred_examples_jar = $hdp2::params::mapred_examples_jar
+  $input_file = 'mapredsmokeinput'
+  $output_file = "mapredsmokeoutput"
+
+  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "$cleanup_cmd ; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${output_file}" 
+  $run_wordcount_job = "jar ${mapred_examples_jar}  wordcount ${input_file} ${output_file}"
+  
+  anchor { 'hdp2-hadoop::mapred::service_check::begin':}
+
+  hdp2-hadoop::exec-hadoop { 'mapred::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 1,
+    try_sleep => 5,
+    require   => Anchor['hdp2-hadoop::mapred::service_check::begin'],
+  #  notify    => Hdp2-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
+    user      => $smoke_test_user
+  }
+
+  hdp2-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
+    command   => $run_wordcount_job,
+    tries     => 1,
+    try_sleep => 5,
+    require   => Hdp2-hadoop::Exec-hadoop['mapred::service_check::create_file'],
+    notify    => Hdp2-hadoop::Exec-hadoop['mapred::service_check::test'],
+    user      => $smoke_test_user,
+    logoutput => "true"
+  }
+
+#  exec { 'runjob':
+#    command   => "hadoop jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}",
+#    tries     => 1,
+#    try_sleep => 5,
+#    require   => Hdp2-hadoop::Exec-hadoop['mapred::service_check::create_file'],
+#    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+#    notify    => Hdp2-hadoop::Exec-hadoop['mapred::service_check::test'],
+#    logoutput => "true",
+#    user      => $smoke_test_user
+#  }
+
+  hdp2-hadoop::exec-hadoop { 'mapred::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Hdp2-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
+    before      => Anchor['hdp2-hadoop::mapred::service_check::end'], #TODO: remove after testing
+    user        => $smoke_test_user
+  }
+  
+  anchor{ 'hdp2-hadoop::mapred::service_check::end':}
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/namenode.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/namenode.pp
new file mode 100644
index 0000000..19f8056
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/namenode.pp
@@ -0,0 +1,78 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::namenode(
+  $service_state = $hdp2::params::cluster_service_state,
+  $slave_hosts = [],
+  $format = true,
+  $opts = {}
+) inherits hdp2-hadoop::params
+{
+  $hdp2::params::service_exists['hdp2-hadoop::namenode'] = true
+
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+  Hdp2-hadoop::Package<||>{include_64_bit => true}
+  Hdp2-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $dfs_name_dir = $hdp2-hadoop::params::dfs_name_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+ 
+    hdp2-hadoop::namenode::create_name_dirs { $dfs_name_dir: 
+      service_state => $service_state
+    }
+   
+    Hdp2-Hadoop::Configfile<||>{namenode_host => $hdp2::params::host_address}
+    Hdp2::Configfile<||>{namenode_host => $hdp2::params::host_address} #for components other than hadoop (e.g., hbase) 
+  
+    if ($service_state == 'running' and $format == true) {
+      class {'hdp2-hadoop::namenode::format' : }
+    }
+
+    hdp2-hadoop::service{ 'namenode':
+      ensure       => $service_state,
+      user         => $hdp2-hadoop::params::hdfs_user,
+      initial_wait => hdp_option_value($opts,'wait')
+    }
+    #top level does not need anchors
+    Class['hdp2-hadoop'] ->  Hdp2-hadoop::Service['namenode']
+    Hdp2-hadoop::Namenode::Create_name_dirs<||> -> Hdp2-hadoop::Service['namenode']
+    if ($service_state == 'running' and $format == true) {
+      Class['hdp2-hadoop'] -> Class['hdp2-hadoop::namenode::format'] -> Hdp2-hadoop::Service['namenode']
+      Hdp2-hadoop::Namenode::Create_name_dirs<||> -> Class['hdp2-hadoop::namenode::format']
+    } 
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::namenode::create_name_dirs($service_state)
+{
+  $dirs = hdp_array_from_comma_list($name)
+  hdp2::directory_recursive_create { $dirs :
+    owner => $hdp2-hadoop::params::hdfs_user,
+    mode => '0755',
+    service_state => $service_state,
+    force => true
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/format.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/format.pp
new file mode 100644
index 0000000..aacca09
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/format.pp
@@ -0,0 +1,56 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::namenode::format(
+  $force = false
+)
+{
+  $mark_dir = $hdp2-hadoop::params::namenode_formatted_mark_dir
+  $dfs_name_dir = $hdp2-hadoop::params::dfs_name_dir
+  $hdfs_user = $hdp2::params::hdfs_user
+  $hadoop_conf_dir = $hdp2-hadoop::params::conf_dir
+
+  if ($force == true) {
+      hdp2-hadoop::exec-hadoop { 'namenode -format' :
+      command => 'namenode -format',
+      notify  => Hdp2::Exec['set namenode mark']
+    }
+  } else {
+      file { '/tmp/checkForFormat.sh':
+      ensure => present,
+      source => "puppet:///modules/hdp2-hadoop/checkForFormat.sh",
+      mode => '0755'
+    }
+
+    exec { '/tmp/checkForFormat.sh':
+      command   => "sh /tmp/checkForFormat.sh ${hdfs_user} ${hadoop_conf_dir} ${mark_dir} ${dfs_name_dir} ",
+      unless   => "test -d ${mark_dir}",
+      require   => File['/tmp/checkForFormat.sh'],
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      logoutput => "true",
+      notify   => Hdp2::Exec['set namenode mark']
+    }
+  }
+
+  hdp2::exec { 'set namenode mark' :
+    command     => "mkdir -p ${mark_dir}",
+    refreshonly => true
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/service_check.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/service_check.pp
new file mode 100644
index 0000000..1168027
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/service_check.pp
@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::namenode::service_check()
+{
+  hdp2-hadoop::exec-hadoop { 'namenode::service_check':
+    command   => 'dfs -ls /',
+    tries     => 3,
+    try_sleep => 5
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/nodemanager.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/nodemanager.pp
new file mode 100644
index 0000000..f4dff18
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/nodemanager.pp
@@ -0,0 +1,63 @@
+class hdp2-hadoop::nodemanager(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hadoop::params
+{
+  $hdp2::params::service_exists['hdp2-hadoop::nodemanager'] = true
+
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp2::params::use_32_bits_on_slaves == true) {
+    Hdp2-hadoop::Package<||>{include_32_bit => true}
+    Hdp2-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp2-hadoop::Package<||>{include_64_bit => true}
+    Hdp2-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $yarn_nm_local_dirs = $hdp2-hadoop::params::yarn_nm_local_dirs
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+  
+    hdp2-hadoop::nodemanager::create_local_dirs { $yarn_nm_local_dirs: 
+      service_state => $service_state
+    }
+    
+    if ($hdp2::params::service_exists['hdp2-hadoop::resourcemanager'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+
+    hdp2-hadoop::service{ 'nodemanager':
+      ensure => $service_state,
+      user   => $hdp2-hadoop::params::yarn_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp2-hadoop'] -> Hdp2-hadoop::Service['nodemanager']
+    Hdp2-hadoop::Nodemanager::Create_local_dirs<||> -> Hdp2-hadoop::Service['nodemanager']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::nodemanager::create_local_dirs($service_state)
+{
+  if ($hdp2::params::service_exists['hdp2-hadoop::resourcemanager'] != true) {
+    $dirs = hdp_array_from_comma_list($name)
+    hdp2::directory_recursive_create { $dirs :
+      owner => $hdp2-hadoop::params::yarn_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/package.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/package.pp
new file mode 100644
index 0000000..6158c5a
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/package.pp
@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton, but using define so can use collections to override params
+define hdp2-hadoop::package(
+  $ensure = 'present',
+  $include_32_bit = false,
+  $include_64_bit = false
+)
+{
+  #just use 32 if its specifically requested and no 64 bit requests
+  if ($include_32_bit == true) and ($include_64_bit != true) {
+    $size = 32
+  } else  {
+    $size = 64
+  }
+  $package = "hadoop ${size}"
+  $mapreduce_lzo_enabled = $hdp2::params::mapreduce_lzo_enabled
+
+  hdp2::package{ $package:
+    ensure       => $ensure,
+    package_type => 'hadoop',
+    size         => $size,
+    lzo_needed   => $mapreduce_lzo_enabled
+  }
+  anchor{ 'hdp2-hadoop::package::helper::begin': } -> Hdp2::Package[$package] -> anchor{ 'hdp2-hadoop::package::helper::end': }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/params.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/params.pp
new file mode 100644
index 0000000..44b95b2
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/params.pp
@@ -0,0 +1,178 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::params(
+) inherits hdp2::params 
+{
+
+  ##TODO: for testing in masterless mode
+  $use_preconditions = false
+  ####  
+  $conf_dir = $hdp2::params::hadoop_conf_dir 
+
+  ####### users
+
+  $hdfs_user = $hdp2::params::hdfs_user
+  $yarn_user = $hdp2::params::yarn_user
+  $mapred_user = $hdp2::params::mapred_user
+  
+  ### hadoop-env
+  
+  $dtnode_heapsize = hdp_default("hadoop/hadoop-env/dtnode_heapsize","1024m")
+
+  $hadoop_heapsize = hdp_default("hadoop/hadoop-env/hadoop_heapsize","1024m")
+
+  $hadoop_logdirprefix = hdp_default("hadoop/hadoop-env/hadoop_logdirprefix","/var/log/hadoop")
+
+  $hadoop_piddirprefix = hdp_default("hadoop/hadoop-env/hadoop_piddirprefix","/var/run/hadoop")
+  $run_dir = $hadoop_piddirprefix
+  
+  $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
+  
+  $yarn_heapsize = hdp_default("hadoop/yarn-env/yarn_heapsize","1024")
+
+  $rm_heapsize = hdp_default("hadoop/yarn-env/rm_heapsize","1024")
+
+  $rm_opt_maxnewsize = hdp_default("hadoop/yarn-env/rm_opt_maxnewsize","200m")
+
+  $rm_opt_newsize = hdp_default("hadoop/yarn-env/rm_opt_newsize","200m")
+
+  $namenode_heapsize = hdp_default("hadoop/hadoop-env/namenode_heapsize","1024m")
+
+  $namenode_opt_maxnewsize = hdp_default("hadoop/hadoop-env/namenode_opt_maxnewsize","640m")
+
+  $namenode_opt_newsize = hdp_default("hadoop/hadoop-env/namenode_opt_newsize","640m")
+  
+  ### compression related
+  if (($hdp2::params::mapreduce_lzo_enabled == true) and ($hdp2::params::mapreduce_snappy_enabled == true)) {
+    $mapred_compress_map_output = true
+    $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
+  } elsif ($hdp2::params::mapreduce_snappy_enabled == true) {
+    $mapred_compress_map_output = true
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
+  } elsif ($hdp2::params::mapreduce_lzo_enabled == true) {
+    $mapred_compress_map_output = true
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
+    $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
+  } else { 
+    $mapred_compress_map_output = false
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec"
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.DefaultCodec"
+  }
+
+  ### core-site
+  $enable_security_authorization = hdp_default("hadoop/core-site/enable_security_authorization","false")
+
+  $fs_checkpoint_dir = hdp_default("hadoop/core-site/fs_checkpoint_dir","/tmp/dfs/namesecondary")
+
+  $proxyuser_group = hdp_default("hadoop/core-site/proxyuser_group","users")
+
+  $security_type = hdp_default("hadoop/core-site/security_type","simple")
+  
+  ### hdfs-site
+  $datanode_du_reserved = hdp_default("hadoop/hdfs-site/datanode_du_reserved",1073741824)
+
+  $dfs_block_local_path_access_user = hdp_default("hadoop/hdfs-site/dfs_block_local_path_access_user","hbase")
+
+  $dfs_data_dir = $hdp2::params::dfs_data_dir
+
+  $dfs_datanode_address = hdp_default("hadoop/hdfs-site/dfs_datanode_address",50010)
+
+  $dfs_datanode_data_dir_perm = hdp_default("hadoop/hdfs-site/dfs_datanode_data_dir_perm",750)
+
+  $dfs_datanode_failed_volume_tolerated = hdp_default("hadoop/hdfs-site/dfs_datanode_failed_volume_tolerated",0)
+
+  $dfs_datanode_http_address = hdp_default("hadoop/hdfs-site/dfs_datanode_http_address",50075)
+
+  $dfs_exclude = hdp_default("hadoop/hdfs-site/dfs_exclude","dfs.exclude")
+
+  $dfs_include = hdp_default("hadoop/hdfs-site/dfs_include","dfs.include")
+  
+  $dfs_name_dir = hdp_default("hadoop/hdfs-site/dfs_name_dir","/tmp/hadoop-hdfs/dfs/name")
+  
+  $dfs_replication = hdp_default("hadoop/hdfs-site/dfs_replication",3)
+
+  $dfs_support_append = hdp_default("hadoop/hdfs-site/dfs_support_append",true)
+
+  $dfs_webhdfs_enabled = hdp_default("hadoop/hdfs-site/dfs_webhdfs_enabled","false")
+
+
+ ######### yarn #########
+
+   ### yarn-site
+  $yarn_nm_local_dirs = hdp_default("hadoop/yarn-site/yarn_nm_local_dirs","/tmp/hadoop-yarn/nm/local")
+
+  $yarn_nm_log_dirs = hdp_default("hadoop/yarn-site/yarn_nm_log_dirs", "/tmp/hadoop-yarn/nm/logs")
+
+  $scheduler_name = hdp_default("hadoop/yarn-site/scheduler_name","org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler")
+
+  $container_executor = hdp_default("hadoop/yarn-site/container_executor","org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor")
+
+ ######### mapred #######
+   ### mapred-site
+
+  $mapred_system_dir = '/mapred/system'
+
+  $mapreduce_io_sort_mb = hdp_default("hadoop/mapred-site/mapreduce_io_sort_mb","200")
+
+  $io_sort_spill_percent = hdp_default("hadoop/mapred-site/io_sort_spill_percent","0.9")
+
+  $mapred_map_child_java_opts_sz = hdp_default("hadoop/mapred-site/mapred_map_child_java_opts_sz","-Xmx1024m")
+  
+  $mapred_reduce_child_java_opts_sz = hdp_default("hadoop/mapred-site/mapred_reduce_child_java_opts_sz","-Xmx2048m")
+
+  $mapred_cluster_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_map_mem_mb","-1")
+
+  $mapred_cluster_max_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_max_map_mem_mb","-1")
+
+  $mapred_cluster_max_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_max_red_mem_mb","-1")
+
+  $mapred_cluster_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_red_mem_mb","-1")
+
+  $mapred_hosts_exclude = hdp_default("hadoop/mapred-site/mapred_hosts_exclude","mapred.exclude")
+
+  $mapred_hosts_include = hdp_default("hadoop/mapred-site/mapred_hosts_include","mapred.include")
+
+  $mapred_job_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_job_map_mem_mb","1024")
+
+  $mapred_job_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_job_red_mem_mb","2048")
+
+  $mapred_jobstatus_dir = hdp_default("hadoop/mapred-site/mapred_jobstatus_dir","file:////mapred/jobstatus")
+
+  $mapred_local_dir = hdp_default("hadoop/mapred-site/mapred_local_dir","/tmp/hadoop-mapred/mapred/local")
+   
+  $mapred_map_tasks_max = hdp_default("hadoop/mapred-site/mapred_map_tasks_max",4)
+
+  $mapred_red_tasks_max = hdp_default("hadoop/mapred-site/mapred_red_tasks_max",4)
+
+  $mapreduce_userlog_retainhours = hdp_default("hadoop/mapred-site/mapreduce_userlog_retainhours",24)
+
+  $maxtasks_per_job = hdp_default("hadoop/mapred-site/maxtasks_per_job","-1")
+
+  $task_controller = hdp_default("hadoop/mapred-site/task_controller","org.apache.hadoop.mapred.DefaultTaskController")
+
+  #### health_check
+
+  $security_enabled = hdp_default("hadoop/health_check/security_enabled","false")
+
+  $task_bin_exe = hdp_default("hadoop/health_check/task_bin_exe")
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager.pp
new file mode 100644
index 0000000..b1fa7da
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager.pp
@@ -0,0 +1,100 @@
+class hdp2-hadoop::resourcemanager(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hadoop::params
+{
+  $hdp2::params::service_exists['hdp2-hadoop::resourcemanager'] = true
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+  Hdp2-hadoop::Package<||>{include_64_bit => true}
+  Hdp2-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $yarn_user = $hdp2-hadoop::params::yarn_user
+    $mapred_user = $hdp2-hadoop::params::mapred_user
+    $yarn_nm_local_dirs = $hdp2-hadoop::params::yarn_nm_local_dirs 
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+     
+    hdp2-hadoop::resourcemanager::create_local_dirs { $yarn_nm_local_dirs: 
+      service_state => $service_state
+    }
+
+    #TODO: cleanup 
+    Hdp2-Hadoop::Configfile<||>{yarn_rm_host => $hdp2::params::host_address}
+
+    class { 'hdp2-hadoop::resourcemanager::hdfs-directory' : 
+      service_state => $service_state 
+    }
+
+    #TODO: do we keep precondition here?
+    if ($service_state == 'running' and $hdp2-hadoop::params::use_preconditions == true) {
+      class { 'hdp2-hadoop::hdfs::service_check':
+        before => Hdp2-hadoop::Service['resourcemanager'],
+        require => Class['hdp2-hadoop']
+      }
+    }
+
+    hdp2-hadoop::service{ 'resourcemanager':
+      ensure       => $service_state,
+      user         => $yarn_user
+    }
+  
+    hdp2-hadoop::service{ 'historyserver':
+      ensure         => $service_state,
+      user           => $mapred_user,
+      create_pid_dir => true,
+      create_log_dir => true 
+    }
+
+    #top level does not need anchors
+    Class['hdp2-hadoop'] -> Hdp2-hadoop::Service['resourcemanager'] -> Hdp2-hadoop::Service['historyserver']
+    Class['hdp2-hadoop::resourcemanager::hdfs-directory'] -> Hdp2-hadoop::Service['resourcemanager']
+    Hdp2-hadoop::Resourcemanager::Create_local_dirs<||> -> Hdp2-hadoop::Service['resourcemanager']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::resourcemanager::create_local_dirs($service_state)
+{
+    $dirs = hdp_array_from_comma_list($name)
+    hdp2::directory_recursive_create { $dirs :
+      owner => $hdp2-hadoop::params::yarn_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+}
+
+class hdp2-hadoop::resourcemanager::hdfs-directory($service_state)
+{
+  hdp2-hadoop::hdfs::directory{ '/app-logs' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::yarn_user,
+    mode          => 1777
+  }  
+  hdp2-hadoop::hdfs::directory{ '/mapred' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::mapred_user,
+    mode          => 755
+  }  
+  hdp2-hadoop::hdfs::directory{ '/mapred/history' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::mapred_user,
+    mode          => 755
+  }  
+  hdp2-hadoop::hdfs::directory{ '/mapred/history/done_intermediate' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::mapred_user,
+    mode          => 1777
+  }  
+  hdp2-hadoop::hdfs::directory{ '/mapred/history/done' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::mapred_user,
+    mode          => 770
+  }  
+  Hdp2-hadoop::Hdfs::Directory['/app-logs'] -> Hdp2-hadoop::Hdfs::Directory['/mapred'] -> Hdp2-hadoop::Hdfs::Directory['/mapred/history'] -> Hdp2-hadoop::Hdfs::Directory['/mapred/history/done'] -> Hdp2-hadoop::Hdfs::Directory['/mapred/history/done_intermediate']
+}
+
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager/service_check.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager/service_check.pp
new file mode 100644
index 0000000..3b6536d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager/service_check.pp
@@ -0,0 +1,8 @@
+class hdp2-hadoop::resourcemanager::service_check()
+{
+  hdp2-hadoop::exec-hadoop { 'resourcemanager::service_check':
+    command   => 'job -list',
+    tries     => 3,
+    try_sleep => 5
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/service.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/service.pp
new file mode 100644
index 0000000..afeed33
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/service.pp
@@ -0,0 +1,107 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2-hadoop::service(
+  $ensure = 'running',
+  $user,
+  $initial_wait = undef,
+  $create_pid_dir = true,
+  $create_log_dir = true
+)
+{
+
+  #NOTE does not work if namenode and datanode are on same host 
+  $pid_dir = "${hdp2-hadoop::params::hadoop_piddirprefix}/${user}"
+  $log_dir = "${hdp2-hadoop::params::hadoop_logdirprefix}/${user}"
+  if (($name == 'resourcemanager') or ($name == 'nodemanager')) {
+    $hadoop_daemon = "${hdp2::params::yarn_sbin}/yarn-daemon.sh"
+    $cmd = "HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec ${hadoop_daemon} --config ${hdp2-hadoop::params::conf_dir}"
+    $pid_file = "${pid_dir}/yarn-${user}-${name}.pid"
+  } elsif ($name == 'historyserver') {
+    $hadoop_daemon = "${hdp2::params::mapred_sbin}/mr-jobhistory-daemon.sh"
+    # Temporary fix to not pass --config till Hadoop fixes it upstream.
+    $cmd = "HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec HADOOP_CONF_DIR=${hdp2-hadoop::params::conf_dir} ${hadoop_daemon}"
+    $pid_file = "${pid_dir}/mapred-${user}-${name}.pid"
+  } else {
+    $hadoop_daemon = "${hdp2::params::hadoop_sbin}/hadoop-daemon.sh"
+    $cmd = "${hadoop_daemon} --config ${hdp2-hadoop::params::conf_dir}"
+    $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
+  }
+   
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
+    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}'"
+    $service_is_up = undef
+  } else {
+    $daemon_cmd = undef
+  }
+ 
+  if ($create_pid_dir == true) {
+    hdp2::directory_recursive_create { $pid_dir: 
+      owner       => $user,
+      context_tag => 'hadoop_service',
+      service_state => $service_state,
+      force => true
+    }
+  }
+  
+  if ($create_log_dir == true) {
+    hdp2::directory_recursive_create { $log_dir: 
+      owner       => $user,
+      context_tag => 'hadoop_service',
+      service_state => $service_state,
+      force => true
+    }
+  }
+  if ($daemon_cmd != undef) {  
+    hdp2::exec { $daemon_cmd:
+      command      => $daemon_cmd,
+      unless       => $service_is_up,
+      initial_wait => $initial_wait
+    }
+  }
+
+  anchor{"hdp2-hadoop::service::${name}::begin":}
+  anchor{"hdp2-hadoop::service::${name}::end":}
+  if ($daemon_cmd != undef) {
+    Anchor["hdp2-hadoop::service::${name}::begin"] -> Hdp2::Exec[$daemon_cmd] -> Anchor["hdp2-hadoop::service::${name}::end"]
+
+    if ($create_pid_dir == true) {
+      Anchor["hdp2-hadoop::service::${name}::begin"] -> Hdp2::Directory_recursive_create[$pid_dir] -> Hdp2::Exec[$daemon_cmd] 
+    }
+     if ($create_log_dir == true) {
+      Anchor["hdp2-hadoop::service::${name}::begin"] -> Hdp2::Directory_recursive_create[$log_dir] -> Hdp2::Exec[$daemon_cmd] 
+    }
+  }
+  if ($ensure == 'running') {
+    #TODO: look at Puppet resource retry and retry_sleep
+    #TODO: can make sleep contingent on $name
+    $sleep = 5
+    $post_check = "sleep ${sleep}; ${service_is_up}"
+    hdp2::exec { $post_check:
+      command => $post_check,
+      unless  => $service_is_up
+    }
+    Hdp2::Exec[$daemon_cmd] -> Hdp2::Exec[$post_check] -> Anchor["hdp2-hadoop::service::${name}::end"]
+  }  
+}
+
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/slave/master-conn.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/slave/master-conn.pp
new file mode 100644
index 0000000..123dfbb
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/slave/master-conn.pp
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::slave::master-conn($master_host)
+{
+  Hdp2-Hadoop::Configfile<||>{
+    namenode_host => $master_host,
+    yarn_rm_host   => $master_host
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/slave/namenode-conn.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/slave/namenode-conn.pp
new file mode 100644
index 0000000..6fa297d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/slave/namenode-conn.pp
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: this might be replaced by just using hdp2::namenode-conn
+class hdp2-hadoop::slave::namenode-conn($namenode_host)
+{
+  #TODO: check if can get rido of both
+  Hdp2-Hadoop::Configfile<||>{namenode_host => $namenode_host}
+  Hdp2::Configfile<||>{namenode_host => $namenode_host} #for components other than hadoop (e.g., hbase) 
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/slave/resourcemanager-conn.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/slave/resourcemanager-conn.pp
new file mode 100644
index 0000000..1967de3
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/slave/resourcemanager-conn.pp
@@ -0,0 +1,4 @@
+class hdp2-hadoop::slave::resourcemanager-conn($resourcemanager_host)
+{
+  Hdp2-Hadoop::Configfile<||>{yarn_rm_host => $resourcemanager_host}
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/smoketest.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/smoketest.pp
new file mode 100644
index 0000000..85680fc
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/smoketest.pp
@@ -0,0 +1,48 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::smoketest(
+  $opts={}
+)
+{
+  $mapred_examples_jar = $hdp2::params::mapred_examples_jar
+
+  #TODO: put in wait
+  #TODO: look for better way to compute outname
+  $date_format = '"%M%d%y"'
+  $outname = inline_template("<%=  `date +${date_format}`.chomp %>")
+
+  #TODO: hardwired to run on namenode and to use user hdfs
+
+  $put = "dfs -put /etc/passwd passwd-${outname}"
+  $exec = "jar ${mapred_examples_jar} wordcount passwd-${outname} ${outname}.out"
+  $result = "fs -test -e ${outname}.out /dev/null 2>&1"
+  anchor{ "hdp2-hadoop::smoketest::begin" :} ->
+  hdp2-hadoop::exec-hadoop{ $put:
+    command => $put
+  } ->
+  hdp2-hadoop::exec-hadoop{ $exec:
+    command =>  $exec
+  } ->
+  hdp2-hadoop::exec-hadoop{ $result:
+    command =>  $result
+  } ->
+  anchor{ "hdp2-hadoop::smoketest::end" :}
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/manifests/snamenode.pp b/hmc/puppet/modules/hdp2-hadoop/manifests/snamenode.pp
new file mode 100644
index 0000000..1bb3244
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/manifests/snamenode.pp
@@ -0,0 +1,77 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::snamenode(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hadoop::params  
+{
+  $hdp2::params::service_exists['hdp2-hadoop::snamenode'] = true
+
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+  Hdp2-hadoop::Package<||>{include_64_bit => true}
+  Hdp2-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $fs_checkpoint_dir = $hdp2-hadoop::params::fs_checkpoint_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+ 
+    Hdp2-Hadoop::Configfile<||>{snamenode_host => $hdp2::params::host_address}
+  
+    hdp2-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
+      service_state => $service_state
+    }
+    
+    if ($hdp2::params::service_exists['hdp2-hadoop::namenode'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+    
+    hdp2-hadoop::service{ 'secondarynamenode':
+      ensure         => $service_state,
+      user           => $hdp2-hadoop::params::hdfs_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp2-hadoop'] -> Hdp2-hadoop::Service['secondarynamenode']
+    Hdp2-hadoop::Namenode::Create_name_dirs<||> -> Hdp2-hadoop::Service['secondarynamenode']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::snamenode::create_name_dirs($service_state)
+{
+   $dirs = hdp_array_from_comma_list($name)
+   hdp2::directory_recursive_create { $dirs :
+     owner => $hdp2-hadoop::params::hdfs_user,
+     mode => '0755',
+     service_state => $service_state,
+     force => true
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/capacity-scheduler.xml.erb b/hmc/puppet/modules/hdp2-hadoop/templates/capacity-scheduler.xml.erb
new file mode 100644
index 0000000..cc64305
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/capacity-scheduler.xml.erb
@@ -0,0 +1,112 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.1</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue 
+      allocations.
+    </description>
+  </property>
+
+</configuration>
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/commons-logging.properties.erb b/hmc/puppet/modules/hdp2-hadoop/templates/commons-logging.properties.erb
new file mode 100644
index 0000000..77e458f
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/commons-logging.properties.erb
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/container-executor.cfg.erb b/hmc/puppet/modules/hdp2-hadoop/templates/container-executor.cfg.erb
new file mode 100644
index 0000000..4bbd5fd
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/container-executor.cfg.erb
@@ -0,0 +1,4 @@
+yarn.nodemanager.local-dirs=<%=scope.function_hdp_template_var("yarn_nm_local_dirs")%>
+yarn.nodemanager.linux-container-executor.group=hadoop
+yarn.nodemanager.log-dirs=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/<%=scope.function_hdp_template_var("yarn_user")%>
+banned.users=hfds,bin,0
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/core-site.xml.erb b/hmc/puppet/modules/hdp2-hadoop/templates/core-site.xml.erb
new file mode 100644
index 0000000..a854394
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/core-site.xml.erb
@@ -0,0 +1,247 @@
+<?xml version="1.0"?>

+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

+

+ <!--

+    Licensed to the Apache Software Foundation (ASF) under one or more

+    contributor license agreements.  See the NOTICE file distributed with

+    this work for additional information regarding copyright ownership.

+    The ASF licenses this file to You under the Apache License, Version 2.0

+    (the "License"); you may not use this file except in compliance with

+    the License.  You may obtain a copy of the License at

+ 

+        http://www.apache.org/licenses/LICENSE-2.0

+ 

+    Unless required by applicable law or agreed to in writing, software

+    distributed under the License is distributed on an "AS IS" BASIS,

+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+    See the License for the specific language governing permissions and

+    limitations under the License.

+ -->

+ 

+<!-- Put site-specific property overrides in this file. -->

+

+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">

+

+<!-- i/o properties -->

+

+  <property>

+    <name>io.file.buffer.size</name>

+    <value>131072</value>

+    <description>The size of buffer for use in sequence files.

+  The size of this buffer should probably be a multiple of hardware

+  page size (4096 on Intel x86), and it determines how much data is

+  buffered during read and write operations.</description>

+  </property>

+

+  <property>

+    <name>io.serializations</name>

+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>

+  </property>

+

+  <property>

+    <name>io.compression.codecs</name>

+    <value><%=scope.function_hdp_template_var("compression_codecs")%></value>

+    <description>A list of the compression codec classes that can be used

+                 for compression/decompression.</description>

+  </property>

+

+  <property>

+    <name>io.compression.codec.lzo.class</name>

+    <value>com.hadoop.compression.lzo.LzoCodec</value>

+    <description>The implementation for lzo codec.</description>

+  </property>

+

+<!-- file system properties -->

+

+  <property>

+    <name>fs.default.name</name>

+    <!-- cluster variant -->

+    <value>hdfs://<%=scope.function_hdp_host("namenode_host")%>:8020</value>

+    <description>The name of the default file system.  Either the

+  literal string "local" or a host:port for NDFS.</description>

+    <final>true</final>

+  </property>

+

+  <property>

+    <name>fs.trash.interval</name>

+    <value>360</value>

+    <description>Number of minutes between trash checkpoints.

+  If zero, the trash feature is disabled.

+  </description>

+  </property>

+

+  <property>

+    <name>fs.checkpoint.dir</name>

+    <value><%=scope.function_hdp_template_var("fs_checkpoint_dir")%></value>

+    <description>Determines where on the local filesystem the DFS secondary

+        name node should store the temporary images to merge.

+        If this is a comma-delimited list of directories then the image is

+        replicated in all of the directories for redundancy.

+    </description>

+  </property>

+

+  <property>

+    <name>fs.checkpoint.edits.dir</name>

+    <value>${fs.checkpoint.dir}</value>

+    <description>Determines where on the local filesystem the DFS secondary

+        name node should store the temporary edits to merge.

+        If this is a comma-delimited list of directoires then teh edits is

+        replicated in all of the directoires for redundancy.

+        Default value is same as fs.checkpoint.dir

+    </description>

+  </property>

+

+  <property>

+    <name>fs.checkpoint.size</name>

+    <value>536870912</value>

+    <description>The size of the current edit log (in bytes) that triggers

+       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.

+  </description>

+  </property>

+

+  <!-- ipc properties: copied from kryptonite configuration -->

+  <property>

+    <name>ipc.client.idlethreshold</name>

+    <value>8000</value>

+    <description>Defines the threshold number of connections after which

+               connections will be inspected for idleness.

+  </description>

+  </property>

+

+  <property>

+    <name>ipc.client.connection.maxidletime</name>

+    <value>30000</value>

+    <description>The maximum time after which a client will bring down the

+               connection to the server.

+  </description>

+  </property>

+

+  <property>

+    <name>ipc.client.connect.max.retries</name>

+    <value>50</value>

+    <description>Defines the maximum number of retries for IPC connections.</description>

+  </property>

+

+  <!-- Web Interface Configuration -->

+  <property>

+    <name>webinterface.private.actions</name>

+    <value>false</value>

+    <description> If set to true, the web interfaces of JT and NN may contain

+                actions, such as kill job, delete file, etc., that should

+                not be exposed to public. Enable this option if the interfaces

+                are only reachable by those who have the right authorization.

+  </description>

+  </property>

+

+ <property>

+   <name>hadoop.security.authentication</name>

+   <value><%=scope.function_hdp_template_var("security_type")%></value>

+   <description>

+   Set the authentication for the cluster. Valid values are: simple or

+   kerberos.

+   </description>

+ </property>

+<property>

+  <name>hadoop.security.authorization</name>

+  <value><%=scope.function_hdp_template_var("enable_security_authorization")%></value>

+  <description>

+     Enable authorization for different protocols.

+  </description>

+</property>

+

+  <property>

+    <name>hadoop.security.auth_to_local</name>

+    <value>

+        RULE:[2:$1@$0]([rn]m@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("yarn_user")%>/

+        RULE:[2:$1@$0]([nd]n@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hdfs_user")%>/

+        RULE:[2:$1@$0](hm@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/

+        RULE:[2:$1@$0](rs@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/

+        DEFAULT</value>

+<description>The mapping from kerberos principal names to local OS user names.

+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.

+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.

+The translations rules have 3 sections:

+      base     filter    substitution

+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.

+

+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"

+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"

+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"

+

+The filter is a regex in parens that must the generated string for the rule to apply.

+

+"(.*%admin)" will take any string that ends in "%admin"

+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"

+

+Finally, the substitution is a sed rule to translate a regex into a fixed string.

+

+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".

+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".

+"s/X/Y/g" replaces all of the "X" in the name with "Y"

+

+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:

+

+RULE:[1:$1@$0](.@ACME.ORG)s/@.//

+DEFAULT

+

+To also translate the names with a second component, you'd make the rules:

+

+RULE:[1:$1@$0](.@ACME.ORG)s/@.//

+RULE:[2:$1@$0](.@ACME.ORG)s/@.//

+DEFAULT

+

+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:

+

+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/

+DEFAULT

+    </description>

+  </property>

+

+<property>

+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("hcat_user")%>.groups</name>

+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>

+  <description>

+     Proxy group for Hadoop.

+  </description>

+</property>

+

+<property>

+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("hcat_user")%>.hosts</name>

+  <value><%=scope.function_hdp_host("gateway_host")%></value>

+  <description>

+     Proxy host for Hadoop.

+  </description>

+</property>

+

+<property>

+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.groups</name>

+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>

+  <description>

+     Proxy group for Hadoop.

+  </description>

+</property>

+

+<property>

+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.hosts</name>

+  <value><%=scope.function_hdp_host("oozie_server")%></value>

+  <description>

+     Proxy host for Hadoop.

+  </description>

+</property>

+

+<property>

+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.groups</name>

+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>

+  <description>

+    Proxy group for templeton.

+  </description>

+</property>

+

+<property>

+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.hosts</name>

+  <value><%=scope.function_hdp_host("templeton_server_host")%></value>

+  <description>

+    Proxy host for templeton.

+  </description>

+</property>

+</configuration>

diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-env.sh.erb b/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-env.sh.erb
new file mode 100644
index 0000000..260578d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-env.sh.erb
@@ -0,0 +1,95 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+
+#TODO: we shouldn't need to define HADOOP_HOME and HADOOP_LIBEXEC_DIR!
+export HADOOP_HOME_WARN_SUPPRESS=1
+export HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp2::params::hadoop_home")%>}
+export HADOOP_LIBEXEC_DIR=${HADOOP_HOME}/lib/hadoop/libexec
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+
+HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%> ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx128m ${HADOOP_CLIENT_OPTS}"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData ${HADOOP_JAVA_PLATFORM_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER
+# TODO: temporary fix for mapred_log_dir. Scripts should be fixed
+export HADOOP_MAPRED_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER
+
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
+# TODO: temporary fix for mapred_pid_dir. Scripts should be fixed
+export HADOOP_MAPRED_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
+export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$HADOOP_SECURE_DN_USER
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb b/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
new file mode 100644
index 0000000..58695cc
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties.erb b/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties.erb
new file mode 100644
index 0000000..3185d1b
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties.erb
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-policy.xml.erb b/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-policy.xml.erb
new file mode 100644
index 0000000..7c1dc63
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-policy.xml.erb
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.submission.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.task.umbilical.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value> <%=scope.function_hdp_template_var("hdfs_user")%></value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/hdfs-site.xml.erb b/hmc/puppet/modules/hdp2-hadoop/templates/hdfs-site.xml.erb
new file mode 100644
index 0000000..58b54da
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/hdfs-site.xml.erb
@@ -0,0 +1,400 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value><%=scope.function_hdp_template_var("dfs_name_dir")%></value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value><%=scope.function_hdp_template_var("dfs_support_append")%></value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value><%=scope.function_hdp_template_var("dfs_webhdfs_enabled")%></value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value><%=scope.function_hdp_template_var("dfs_datanode_failed_volume_tolerated")%></value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value><%=scope.function_hdp_template_var("dfs_block_local_path_access_user")%></value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value><%=scope.function_hdp_template_var("dfs_data_dir")%></value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.checksum.type</name>
+    <value>CRC32</value>
+    <description>The checksum method to be used by default. To maintain
+    compatibility, it is being set to CRC32. Once all migration steps
+    are complete, we can change it to CRC32C and take advantage of the
+    additional performance benefit.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value><%=scope.function_hdp_template_var("dfs_replication")%></value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:<%=scope.function_hdp_template_var("dfs_datanode_address")%></value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:<%=scope.function_hdp_template_var("dfs_datanode_http_address")%></value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value><%=scope.function_hdp_host("namenode_host")%>:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value><%=scope.function_hdp_template_var("datanode_du_reserved")%></value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.datanode.du.pct</name>
+<value>0.85f</value>
+<description>When calculating remaining space, only use this percentage of the real available space
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>022</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value><%=scope.function_hdp_host("snamenode_host")%>:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value><%=scope.function_hdp_template_var("keytab_path")%>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value><%=scope.function_hdp_template_var("keytab_path")%>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value><%=scope.function_hdp_template_var("keytab_path")%>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value><%=scope.function_hdp_template_var("keytab_path")%>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value><%=scope.function_hdp_host("namenode_host")%>:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value><%=scope.function_hdp_template_var("dfs_datanode_data_dir_perm")%></value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+  <property>
+    <name>fs.checkpoint.period</name>
+    <value>21600</value>
+    <description>The number of seconds between two periodic checkpoints.</description>
+  </property>
+
+</configuration>
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/health_check.erb b/hmc/puppet/modules/hdp2-hadoop/templates/health_check.erb
new file mode 100644
index 0000000..67dfb30
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/health_check.erb
@@ -0,0 +1,119 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:50060/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+# Disabled 'check_taskcontroller' for now...
+for check in disks jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/log4j.properties.erb b/hmc/puppet/modules/hdp2-hadoop/templates/log4j.properties.erb
new file mode 100644
index 0000000..0aea879
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/log4j.properties.erb
@@ -0,0 +1,171 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=1MB
+log4j.appender.RFA.MaxBackupIndex=30
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/mapred-site.xml.erb b/hmc/puppet/modules/hdp2-hadoop/templates/mapred-site.xml.erb
new file mode 100644
index 0000000..4303c6f
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/mapred-site.xml.erb
@@ -0,0 +1,266 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- MR AM properties -->
+
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+  </property> 
+
+  <property>
+   <name>yarn.app.mapreduce.am.staging-dir</name>
+   <value>/user</value>
+  </property>
+
+  <property>
+   <name>yarn.app.mapreduce.am.command-opts</name>
+   <value>-Xmx1024m</value>
+  </property>
+
+  <property>
+   <name>mapreduce.job.hdfs-servers</name>
+   <value>${fs.defaultFS}</value>
+  </property>
+       
+  <property>
+    <name>mapreduce.cluster.local.dir</name>
+    <value><%=scope.function_hdp_template_var("mapreduce_cluster_local_dir")%></value>
+    <description>No description</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.map.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some map tasks
+               may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>If true, then multiple instances of some reduce tasks
+               may be executed in parallel.</description>
+  </property>
+
+  <property>
+    <name>mapred.reduce.slowstart.completed.maps</name>
+    <value>0.05</value>
+  </property>
+
+  <property>
+    <name>mapred.output.compression.type</name>
+    <value>BLOCK</value>
+    <description>If the job outputs are to compressed as SequenceFiles, how should
+                 they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.task.timeout</name>
+    <value>600000</value>
+    <description>The number of milliseconds before a task will be
+  terminated if it neither reads an input, writes an output, nor
+  updates its status string.
+  </description>
+  </property>
+
+  <property>
+    <name>jetty.connector</name>
+    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.child.root.logger</name>
+    <value>INFO,TLA</value>
+  </property>
+
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
+  </property>
+    
+  <property>
+    <name>mapreduce.job.acl-view-job</name>
+    <value>*</value>
+  </property>
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.sort.mb</name>
+    <value><%=scope.function_hdp_template_var("mapreduce_io_sort_mb")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.spill.percent</name>
+    <value><%=scope.function_hdp_template_var("io_sort_spill_percent")%></value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>io.sort.factor</name>
+    <value>100</value>
+    <description>No description</description>
+  </property>
+
+<!-- map tasks' properties -->
+
+  <property>
+    <name>mapred.map.child.java.opts</name>
+    <value><%=scope.function_hdp_template_var("mapred_map_child_java_opts_sz")%></value>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true</value>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.map.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_job_map_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.map.output.compression.codec</name>
+    <value><%=scope.function_hdp_template_var("mapred_map_output_compression_codec")%></value>
+    <description>If the map outputs are compressed, how should they be
+      compressed
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.compress.map.output</name>
+    <value><%=scope.function_hdp_template_var("mapred_compress_map_output")%></value>
+  </property>
+
+<!-- reduce tasks' properties -->
+
+  <property>
+    <name>mapred.reduce.child.java.opts</name>
+    <value><%=scope.function_hdp_template_var("mapred_reduce_child_java_opts_sz")%></value>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true</value>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapred.job.reduce.memory.mb</name>
+    <value><%=scope.function_hdp_template_var("mapred_job_red_mem_mb")%></value>
+  </property>
+
+  <property>
+    <name>mapred.reduce.parallel.copies</name>
+    <value>30</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.inmem.merge.threshold</name>
+    <value>1000</value>
+    <description>The threshold, in terms of the number of files
+  for the in-memory merge process. When we accumulate threshold number of files
+  we initiate the in-memory merge and spill to disk. A value of 0 or less than
+  0 indicates we want to DON'T have any threshold and instead depend only on
+  the ramfs's memory consumption to trigger the merge.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>The usage threshold at which an in-memory merge will be
+  initiated, expressed as a percentage of the total memory allocated to
+  storing in-memory map outputs, as defined by
+  mapred.job.shuffle.input.buffer.percent.
+  </description>
+  </property>
+
+  <property>
+    <name>mapred.job.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>The percentage of memory to be allocated from the maximum heap
+  size to storing map outputs during the shuffle.
+  </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>The percentage of memory- relative to the maximum heap size- to
+  retain map outputs during the reduce. When the shuffle is concluded, any
+  remaining map outputs in memory must consume less than this threshold before
+  the reduce can begin.
+  </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.input.limit</name>
+    <value>10737418240</value>
+    <description>The limit on the input size of the reduce. (This value
+    is 10 Gb.)  If the estimated input size of the reduce is greater than
+    this value, job is failed. A value of -1 means that there is no limit
+    set. </description>
+  </property>
+
+  <!-- JobHistory Server -->
+
+  <property>
+   <name>mapreduce.jobhistory.intermediate-done-dir</name>
+   <value>/mapred/history/done_intermediate</value>
+  </property>
+
+  <property>
+   <name>mapreduce.jobhistory.done-dir</name>
+   <value>/mapred/history/done</value>
+  </property>
+    
+  <property>
+    <name>mapreduce.jobhistory.address</name>
+    <value><%=scope.function_hdp_host("yarn_rm_host")%>:10020</value>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value><%=scope.function_hdp_host("yarn_rm_host")%>:19888</value>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.create-intermediate-jh-base-dir</name>
+    <value>false</value>
+  </property>
+
+</configuration>
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/slaves.erb b/hmc/puppet/modules/hdp2-hadoop/templates/slaves.erb
new file mode 100644
index 0000000..3cd38b3
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/slaves.erb
@@ -0,0 +1,3 @@
+<%h=scope.function_hdp_host("slave_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
+<%= host %>
+<%end-%>
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/yarn-env.sh.erb b/hmc/puppet/modules/hdp2-hadoop/templates/yarn-env.sh.erb
new file mode 100644
index 0000000..87cfb17
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/yarn-env.sh.erb
@@ -0,0 +1,100 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Java
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-<%=scope.function_hdp_template_var("::hdp2::params::yarn_user")%>}
+
+# TODO: Remove this once Hadoop sets YARN_HOME by default correctly.
+export HADOOP_YARN_HOME=${HADOOP_YARN_HOME:-<%=scope.function_hdp_template_var("::hdp2::params::hadoop_yarn_home")%>}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR=${YARN_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+  
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m 
+
+# check envvars which might override default args
+export YARN_HEAPSIZE="<%=scope.function_hdp_template_var("yarn_heapsize")%>"
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  #echo "run with heapsize $YARN_HEAPSIZE"
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE"
+  #echo $JAVA_HEAP_MAX
+fi
+
+# ResourceManager Java options
+export YARN_RESOURCEMANAGER_HEAPSIZE="<%=scope.function_hdp_template_var("resourcemanager_heapsize")%>"
+resourcemanager_opt_newsize="<%=scope.function_hdp_template_var("resourcemanager_opt_newsize")%>"
+if [ "$resourcemanager_opt_newsize" != "" ]; then
+  export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -XX:NewSize=""$resourcemanager_opt_newsize""M"
+fi
+resourcemanager_opt_maxnewsize="<%=scope.function_hdp_template_var("resourcemanager_opt_maxnewsize")%>"
+if [ "$resourcemanager_opt_maxnewsize" != "" ]; then
+  export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -XX:MaxNewSize=""$resourcemanager_opt_maxnewsize""M"
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+export YARN_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# The directory where pid files are stored. /tmp by default.
+export YARN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi  
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+
+
diff --git a/hmc/puppet/modules/hdp2-hadoop/templates/yarn-site.xml.erb b/hmc/puppet/modules/hdp2-hadoop/templates/yarn-site.xml.erb
new file mode 100644
index 0000000..eb2b602
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hadoop/templates/yarn-site.xml.erb
@@ -0,0 +1,161 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- ResourceManager -->
+
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value><%=scope.function_hdp_host("yarn_rm_host")%>:8025</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value><%=scope.function_hdp_host("yarn_rm_host")%>:8030</value>
+  </property>
+  
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value><%=scope.function_hdp_host("yarn_rm_host")%>:8050</value>
+  </property>
+
+	<property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value><%=scope.function_hdp_host("yarn_rm_host")%>:8141</value>
+	</property>
+
+  <property>
+   <name>yarn.resourcemanager.scheduler.class</name>
+   <value><%=scope.function_hdp_template_var("scheduler_name")%></value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value><%=scope.function_hdp_template_var("yarn_scheduler_min_alloc_mb")%></value>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value><%=scope.function_hdp_template_var("yarn_scheduler_max_alloc_mb")%></value>
+  </property>
+
+<!-- NodeManager -->
+
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.webapp.address</name>
+    <value>0.0.0.0:50060</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value><%=scope.function_hdp_template_var("yarn_nm_local_dirs")%></value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value><%=scope.function_hdp_template_var("yarn_nm_resource_memory_mb")%></value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+  </property>
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
+  <description>Classpath for typical applications.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+    setting memory limits for containers. Container allocations are
+    expressed in terms of physical memory, and virtual memory usage
+    is allowed to exceed this allocation by this ratio.
+    </description>
+  </property>
+  
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value><%=scope.function_hdp_template_var("container_executor")%></value>
+    <description>ContainerExecutor for launching containers</description>
+  </property>
+ 
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce.shuffle</value>
+    <description>Auxilliary services of NodeManager</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.\1.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value><%=scope.function_hdp_template_var("yarn_nm_log_dirs")%></value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>The interval, in milliseconds, for which the node manager
+    waits  between two cycles of monitoring its containers' memory usage. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.path</name>
+    <value><%=scope.function_hdp_template_var("conf_dir")%>/health_check</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value> 
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>36000</value>
+  </property>
+
+
+</configuration>
diff --git a/hmc/puppet/modules/hdp2-hbase/files/hbaseSmoke.sh b/hmc/puppet/modules/hdp2-hbase/files/hbaseSmoke.sh
new file mode 100644
index 0000000..36ff10e
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/files/hbaseSmoke.sh
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'usertable'
+drop 'usertable'
+create 'usertable','family'
+put 'usertable','row01','family:col01','value1'
+scan 'usertable'
+exit
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/client.pp b/hmc/puppet/modules/hdp2-hbase/manifests/client.pp
new file mode 100644
index 0000000..4e0db01
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/client.pp
@@ -0,0 +1,39 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hbase::client(
+  $service_state = $hdp2::params::cluster_client_state,
+  $opts = {}
+)
+{
+  #assumption is there are no other hbase components on node
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+    if (($hdp2::params::service_exists['hdp2-hbase::master'] != true) and ($hdp2::params::service_exists['hdp2-hbase::regionserver'] != true)) {
+      #adds package, users, directories, and common configs
+      class { 'hdp2-hbase': 
+        type          => 'client',
+        service_state => $service_state
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/hbase/service_check.pp b/hmc/puppet/modules/hdp2-hbase/manifests/hbase/service_check.pp
new file mode 100644
index 0000000..c7c2301
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/hbase/service_check.pp
@@ -0,0 +1,56 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hbase::hbase::service_check() 
+{
+  $smoke_test_user = $hdp2::params::smokeuser
+
+  $output_file = "/apps/hbase/data/usertable"
+  $conf_dir = $hdp2::params::hbase_conf_dir
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp2-hbase::hbase::service_check::begin':}
+
+  file { '/tmp/hbaseSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp2-hbase/hbaseSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hbaseSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell /tmp/hbaseSmoke.sh'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hbaseSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp2-hadoop::Exec-hadoop['hbase::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hbase::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hbaseSmoke.sh'],
+    before      => Anchor['hdp2-hbase::hbase::service_check::end'] #TODO: remove after testing
+  }
+  
+  anchor{ 'hdp2-hbase::hbase::service_check::end':}
+}
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/init.pp b/hmc/puppet/modules/hdp2-hbase/manifests/init.pp
new file mode 100644
index 0000000..243065b
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/init.pp
@@ -0,0 +1,89 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hbase(
+  $type,
+  $service_state) 
+{
+  include hdp2-hbase::params
+ 
+  $hbase_user = $hdp2-hbase::params::hbase_user
+  $config_dir = $hdp2-hbase::params::conf_dir
+  
+  $hdp2::params::component_exists['hdp2-hbase'] = true
+
+  anchor{'hdp2-hbase::begin':}
+  anchor{'hdp2-hbase::end':}
+
+  if ($service_state == 'uninstalled') {
+    hdp2::package { 'hbase':
+      ensure => 'uninstalled'
+    }
+    hdp2::directory { $config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp2-hbase::begin'] -> Hdp2::Package['hbase'] -> Hdp2::Directory[$config_dir] -> Anchor['hdp2-hbase::end']
+
+  } else {  
+    hdp2::package { 'hbase': }
+  
+    hdp2::user{ $hbase_user:}
+ 
+    hdp2::directory { $config_dir: 
+      service_state => $service_state,
+      force => true
+    }
+
+   hdp2-hbase::configfile { ['hbase-env.sh','hbase-site.xml','hbase-policy.xml','log4j.properties','hadoop-metrics.properties']: 
+      type => $type
+    }
+    hdp2-hbase::configfile { 'regionservers':}
+    Anchor['hdp2-hbase::begin'] -> Hdp2::Package['hbase'] -> Hdp2::User[$hbase_user] -> Hdp2::Directory[$config_dir] -> 
+    Hdp2-hbase::Configfile<||> ->  Anchor['hdp2-hbase::end']
+  }
+}
+
+### config files
+define hdp2-hbase::configfile(
+  $mode = undef,
+  $hbase_master_host = undef,
+  $template_tag = undef,
+  $type = undef,
+) 
+{
+  if ($name == 'hadoop-metrics.properties') {
+    if ($type == 'master') {
+    $tag = GANGLIA-MASTER
+  } else {
+     $tag = GANGLIA-RS
+  }
+   } else {
+    $tag = $template_tag
+}
+  hdp2::configfile { "${hdp2-hbase::params::conf_dir}/${name}":
+    component         => 'hbase',
+    owner             => $hdp2-hbase::params::hbase_user,
+    mode              => $mode,
+    hbase_master_host => $hbase_master_host,
+    template_tag      => $tag
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/master-conn.pp b/hmc/puppet/modules/hdp2-hbase/manifests/master-conn.pp
new file mode 100644
index 0000000..4391322
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/master-conn.pp
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hbase::master-conn($hbase_master_host)
+{
+  Hdp2-Hbase::Configfile<||>{hbase_master_host => $hbase_master_host}
+}
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/master.pp b/hmc/puppet/modules/hdp2-hbase/manifests/master.pp
new file mode 100644
index 0000000..5cbdffd
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/master.pp
@@ -0,0 +1,62 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hbase::master(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hbase::params 
+{
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
+    $hdp2::params::service_exists['hdp2-hbase::master'] = true
+
+    $hdfs_root_dir = $hdp2-hbase::params::hbase_hdfs_root_dir
+  
+    #adds package, users, directories, and common configs
+    class { 'hdp2-hbase': 
+      type          => 'master',
+      service_state => $service_state
+    }
+
+    Hdp2-hbase::Configfile<||>{hbase_master_host => $hdp2::params::host_address}
+  
+    hdp2-hadoop::hdfs::directory { $hdfs_root_dir:
+      owner         => $hdp2-hbase::params::hbase_user,
+      service_state => $service_state
+    }    
+
+    hdp2-hbase::service{ 'master':
+      ensure => $service_state
+    }
+
+    #top level does not need anchors
+    Class['hdp2-hbase'] -> Hdp2-hadoop::Hdfs::Directory[$hdfs_root_dir] -> Hdp2-hbase::Service['master'] 
+    } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+#assumes that master and regionserver will not be on same machine
+class hdp2-hbase::master::enable-ganglia()
+{
+  Hdp2-hbase::Configfile<|title  == 'hadoop-metrics.properties'|>{template_tag => 'GANGLIA-MASTER'}
+}
+
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/params.pp b/hmc/puppet/modules/hdp2-hbase/manifests/params.pp
new file mode 100644
index 0000000..881a214
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/params.pp
@@ -0,0 +1,86 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hbase::params() inherits hdp2::params 
+{
+  
+  ####### users
+  $hbase_user = $hdp2::params::hbase_user
+  
+  ### hbase-env
+  $hadoop_conf_dir = hdp_default("hadoop/hbase-env/hadoop_conf_dir")
+  $conf_dir = $hdp2::params::hbase_conf_dir
+
+  $hbase_log_dir = hdp_default("hadoop/hbase-env/hbase_log_dir","/var/log/hbase")
+
+  $hbase_master_heapsize = hdp_default("hadoop/hbase-env/hbase_master_heapsize","1000m")
+
+  $hbase_pid_dir = hdp_default("hadoop/hbase-env/hbase_pid_dir","/var/run/hbase")
+
+  $hbase_regionserver_heapsize = hdp_default("hadoop/hbase-env/hbase_regionserver_heapsize","1000m")
+
+  $hbase_regionserver_xmn_size = hdp_calc_xmn_from_xms("$hbase_regionserver_heapsize","0.2","512")
+
+  ### hbase-site.xml
+  $hbase_hdfs_root_dir = hdp_default("hadoop/hbase-site/hbase_hdfs_root_dir","/apps/hbase/data")
+
+  $hbase_tmp_dir = hdp_default("hadoop/hbase-site/hbase_tmp_dir","$hbase_log_dir")
+
+
+  #TODO: check if any of these 'hdfs' vars need to be euated with vars in hdp2-hadoop
+  $hdfs_enable_shortcircuit_read = hdp_default("hadoop/hbase-site/hdfs_enable_shortcircuit_read",false)
+
+  $hdfs_enable_shortcircuit_skipchecksum = hdp_default("hadoop/hbase-site/hdfs_enable_shortcircuit_skipchecksum","false")
+
+  $hdfs_support_append = hdp_default("hadoop/hbase-site/hdfs_support_append",true)
+
+  $hfile_blockcache_size = hdp_default("hadoop/hbase-site/hfile_blockcache_size","0.25")
+
+  $hfile_max_keyvalue_size = hdp_default("hadoop/hbase-site/hfile_max_keyvalue_size",10485760)
+
+  $zookeeper_sessiontimeout = hdp_default("hadoop/hbase-site/zookeeper_sessiontimeout",60000)
+
+  $client_scannercaching = hdp_default("hadoop/hbase-site/client_scannercaching",100)
+
+  $hstore_blockingstorefiles = hdp_default("hadoop/hbase-site/hstore_blockingstorefiles",7)
+
+  $hstore_compactionthreshold = hdp_default("hadoop/hbase-site/hstore_compactionthreshold",3)
+
+  $hstorefile_maxsize = hdp_default("hadoop/hbase-site/hstorefile_maxsize",1073741824)
+
+  $hregion_blockmultiplier = hdp_default("hadoop/hbase-site/hregion_blockmultiplier",2)
+
+  $hregion_memstoreflushsize = hdp_default("hadoop/hbase-site/hregion_memstoreflushsize",134217728)
+
+  $regionserver_handlers = hdp_default("hadoop/hbase-site/regionserver_handlers", 30)
+
+  $hregion_majorcompaction = hdp_default("hadoop/hbase-site/hregion_majorcompaction", 86400000)
+
+  $preloaded_mastercoprocessor_classes = hdp_default("hadoop/hbase-site/preloaded_mastercoprocessor_classes")
+
+  $preloaded_regioncoprocessor_classes = hdp_default("hadoop/hbase-site/preloaded_regioncoprocessor_classes")
+
+  $regionserver_memstore_lab = hdp_default("hadoop/hbase-site/regionserver_memstore_lab",true)
+
+  $regionserver_memstore_lowerlimit = hdp_default("hadoop/hbase-site/regionserver_memstore_lowerlimit","0.35")
+
+  $regionserver_memstore_upperlimit = hdp_default("hadoop/hbase-site/regionserver_memstore_upperlimit","0.4")
+
+}
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/regionserver.pp b/hmc/puppet/modules/hdp2-hbase/manifests/regionserver.pp
new file mode 100644
index 0000000..d497baa
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/regionserver.pp
@@ -0,0 +1,62 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hbase::regionserver(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hbase::params
+{
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {    
+    $hdp2::params::service_exists['hdp2-hbase::regionserver'] = true       
+
+    if ($hdp2::params::service_exists['hdp2-hbase::master'] != true) {
+      #adds package, users, directories, and common configs
+      class { 'hdp2-hbase': 
+        type          => 'regionserver',
+        service_state => $service_state
+      } 
+      $create_pid_dir = true
+      $create_log_dir = true
+    } else {
+      $create_pid_dir = false
+      $create_log_dir = false
+    }
+
+
+    hdp2-hbase::service{ 'regionserver':
+      ensure         => $service_state,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+
+    #top level does not need anchors
+    Class['hdp2-hbase'] ->  Hdp2-hbase::Service['regionserver']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+#assumes that master and regionserver will not be on same machine
+class hdp2-hbase::regionserver::enable-ganglia()
+{
+  Hdp2-hbase::Configfile<|title  == 'hadoop-metrics.properties'|>{template_tag => 'GANGLIA-RS'}
+}
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/service.pp b/hmc/puppet/modules/hdp2-hbase/manifests/service.pp
new file mode 100644
index 0000000..af79bec
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/service.pp
@@ -0,0 +1,76 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2-hbase::service(
+  $ensure = 'running',
+  $create_pid_dir = true,
+  $create_log_dir = true,
+  $initial_wait = undef)
+{
+  include hdp2-hbase::params
+
+  $role = $name
+  $user = $hdp2-hbase::params::hbase_user
+
+  $conf_dir = $hdp2::params::hbase_conf_dir
+  $hbase_daemon = $hdp2::params::hbase_daemon_script
+  $cmd = "$hbase_daemon --config ${conf_dir}"
+  $pid_dir = $hdp2-hbase::params::hbase_pid_dir
+  $pid_file = "${pid_dir}/hbase-hbase-${role}.pid"
+
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} start ${role}'"
+    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} stop ${role}'"
+    $no_op_test = undef
+  } else {
+    $daemon_cmd = undef
+  }
+
+  $tag = "hbase_service-${name}"
+  
+  if ($create_pid_dir == true) {
+    hdp2::directory_recursive_create { $pid_dir: 
+      owner => $user,
+      tag   => $tag,
+      service_state => $ensure,
+      force => true
+    }
+  }
+  if ($create_log_dir == true) {
+    hdp2::directory_recursive_create { $hdp2-hbase::params::hbase_log_dir: 
+      owner => $user,
+      tag   => $tag,
+      service_state => $ensure,
+      force => true
+    }
+  }
+
+  anchor{"hdp2-hbase::service::${name}::begin":} -> Hdp2::Directory_recursive_create<|tag == $tag|> -> anchor{"hdp2-hbase::service::${name}::end":}
+  if ($daemon_cmd != undef) { 
+    hdp2::exec { $daemon_cmd:
+      command      => $daemon_cmd,
+      unless       => $no_op_test,
+      initial_wait => $initial_wait
+    }
+    Hdp2::Directory_recursive_create<|context_tag == 'hbase_service'|> -> Hdp2::Exec[$daemon_cmd] -> Anchor["hdp2-hbase::service::${name}::end"]
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hbase/manifests/zk-conn.pp b/hmc/puppet/modules/hdp2-hbase/manifests/zk-conn.pp
new file mode 100644
index 0000000..f018fd9
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/manifests/zk-conn.pp
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hbase::zk-conn(
+ $zookeeper_hosts
+)
+{
+  Hdp2::Configfile<||>{zookeeper_hosts => $zookeeper_hosts}
+}
diff --git a/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb b/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb
new file mode 100644
index 0000000..f3988d9
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties-GANGLIA-MASTER.erb
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
diff --git a/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb b/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb
new file mode 100644
index 0000000..386376d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties-GANGLIA-RS.erb
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
diff --git a/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties.erb b/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties.erb
new file mode 100644
index 0000000..f3988d9
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/templates/hadoop-metrics.properties.erb
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8663
diff --git a/hmc/puppet/modules/hdp2-hbase/templates/hbase-env.sh.erb b/hmc/puppet/modules/hdp2-hbase/templates/hbase-env.sh.erb
new file mode 100644
index 0000000..65ebf56
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/templates/hbase-env.sh.erb
@@ -0,0 +1,76 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-<%=scope.function_hdp_template_var("hbase_conf_dir")%>}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}:<%=scope.function_hdp_template_var("::hdp2-hadoop::params::conf_dir")%>
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-ea -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode"
+
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx<%=scope.function_hdp_template_var("hbase_master_heapsize")%>"
+export HBASE_REGIONSERVER_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseParNewGC -Xmn64m -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:<%=scope.function_hdp_template_var("hbase_log_dir")%>/log/$USER/gc.log-`date +'%Y%m%d%H%M'` -Xms<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -Xmx<%=scope.function_hdp_template_var("hbase_regionserver_heapsize")%> -XX:ErrorFile=<%=scope.function_hdp_template_var("hbase_log_dir")%>/log/$USER/hs_err_pid%p.log"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR=<%=scope.function_hdp_template_var("hbase_log_dir")%>
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR=<%=scope.function_hdp_template_var("hbase_pid_dir")%>
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
diff --git a/hmc/puppet/modules/hdp2-hbase/templates/hbase-policy.xml.erb b/hmc/puppet/modules/hdp2-hbase/templates/hbase-policy.xml.erb
new file mode 100644
index 0000000..e45f23c
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/templates/hbase-policy.xml.erb
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie. 
+    clients talking to HRegionServers)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie. 
+    clients talking to HMaster for admin operations).
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+    (for HRegionServers communicating with HMaster)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>
diff --git a/hmc/puppet/modules/hdp2-hbase/templates/hbase-site.xml.erb b/hmc/puppet/modules/hdp2-hbase/templates/hbase-site.xml.erb
new file mode 100644
index 0000000..c0b2a5b
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/templates/hbase-site.xml.erb
@@ -0,0 +1,377 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://<%=scope.function_hdp_host("namenode_host")%>:8020<%=scope.function_hdp_template_var("hbase_hdfs_root_dir")%></value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value><%=scope.function_hdp_template_var("hbase_tmp_dir")%></value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value><%=scope.function_hdp_host("hbase_master_host")%></value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value><%=scope.function_hdp_template_var("regionserver_memstore_upperlimit")%></value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value><%=scope.function_hdp_template_var("regionserver_handlers")%></value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value><%=scope.function_hdp_template_var("hregion_majorcompaction")%></value>
+    <description>The time (in miliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value><%=scope.function_hdp_template_var("regionserver_handlers")%></value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value><%=scope.function_hdp_template_var("hregion_majorcompaction")%></value>
+    <description>The time (in miliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value><%=scope.function_hdp_template_var("regionserver_memstore_lowerlimit")%></value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value><%=scope.function_hdp_template_var("hregion_blockmultiplier")%></value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value><%=scope.function_hdp_template_var("hregion_memstoreflushsize")%></value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value><%=scope.function_hdp_template_var("hregion_blockmultiplier")%></value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value><%=scope.function_hdp_template_var("hregion_memstoreflushsize")%></value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value><%=scope.function_hdp_template_var("regionserver_memstore_lab")%></value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value><%=scope.function_hdp_template_var("hstorefile_maxsize")%></value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value><%=scope.function_hdp_template_var("client_scannercaching")%></value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value><%=scope.function_hdp_template_var("zookeeper_sessiontimeout")%></value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value><%=scope.function_hdp_template_var("hfile_max_keyvalue_size")%></value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value><%=scope.function_hdp_template_var("client_scannercaching")%></value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value><%=scope.function_hdp_template_var("zookeeper_sessiontimeout")%></value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value><%=scope.function_hdp_template_var("hfile_max_keyvalue_size")%></value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value><%=scope.function_hdp_template_var("hstore_compactionthreshold")%></value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value><%=scope.function_hdp_template_var("hstore_blockingstorefiles")%></value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value><%=scope.function_hdp_template_var("hfile_blockcache_size")%></value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+  </property>
+
+  <!-- The following properties configure authentication information for
+       HBase processes when using Kerberos security.  There are no default
+       values, included here for documentation purposes -->
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value><%=scope.function_hdp_template_var("keytab_path")%>/hm.service.keytab</value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HMaster server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value>hm/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HMaster process.  The principal name should
+    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
+    portion, it will be replaced with the actual hostname of the running
+    instance.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value><%=scope.function_hdp_template_var("keytab_path")%>/rs.service.keytab</value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HRegionServer server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value>rs/_HOST@<%=scope.function_hdp_template_var("kerberos_domain")%></value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HRegionServer process.  The principal name
+    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
+    hostname portion, it will be replaced with the actual hostname of the
+    running instance.  An entry for this principal must exist in the file
+    specified in hbase.regionserver.keytab.file
+    </description>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value><%=scope.function_hdp_template_var("preloaded_regioncoprocessor_classes")%></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value><%=scope.function_hdp_template_var("preloaded_mastercoprocessor_classes")%></value>
+    <description>A comma-separated list of
+    org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+    loaded by default on the active HMaster process. For any implemented
+    coprocessor methods, the listed classes will be called in order. After
+    implementing your own MasterObserver, just put it in HBase's classpath
+    and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value><%=zkh=scope.function_hdp_host("zookeeper_hosts");scope.function_hdp_is_empty(zkh) ? "" : [zkh].flatten.join(",")%></value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>dfs.support.append</name>
+    <value><%=scope.function_hdp_template_var("hdfs_support_append")%></value>
+    <description>Does HDFS allow appends to files?
+    This is an hdfs config. set in here so the hdfs client will do append support.
+    You must ensure that this config. is true serverside too when running hbase
+    (You will have to restart your cluster after setting it).
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value><%=scope.function_hdp_template_var("hdfs_enable_shortcircuit_read")%></value>
+    <description>Enable/Disable short circuit read for your client.
+    Hadoop servers should be configured to allow short circuit read
+    for the hbase user for this to take effect
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit.skip.checksum</name>
+    <value><%=scope.function_hdp_template_var("hdfs_enable_shortcircuit_skipchecksum")%></value>
+    <description>Enable/disbale skipping the checksum check</description>
+  </property>
+</configuration>
diff --git a/hmc/puppet/modules/hdp2-hbase/templates/log4j.properties.erb b/hmc/puppet/modules/hdp2-hbase/templates/log4j.properties.erb
new file mode 100644
index 0000000..5227c9a
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/templates/log4j.properties.erb
@@ -0,0 +1,80 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=INFO
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
diff --git a/hmc/puppet/modules/hdp2-hbase/templates/regionservers.erb b/hmc/puppet/modules/hdp2-hbase/templates/regionservers.erb
new file mode 100644
index 0000000..159a2f6
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hbase/templates/regionservers.erb
@@ -0,0 +1,3 @@
+<%h=scope.function_hdp_host("hbase_rs_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
+<%= host %>
+<%end-%>
diff --git a/hmc/puppet/modules/hdp2-hcat/files/hcatSmoke.sh b/hmc/puppet/modules/hdp2-hcat/files/hcatSmoke.sh
new file mode 100644
index 0000000..efc1fcb
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hcat/files/hcatSmoke.sh
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+hcat -e "show tables"
+hcat -e "drop table IF EXISTS ${tablename}"
+hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
diff --git a/hmc/puppet/modules/hdp2-hcat/files/pigSmoke.sh b/hmc/puppet/modules/hdp2-hcat/files/pigSmoke.sh
new file mode 100644
index 0000000..b6a286f
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hcat/files/pigSmoke.sh
@@ -0,0 +1,24 @@
+/*
+ *
+ * licensed to the apache software foundation (asf) under one
+ * or more contributor license agreements.  see the notice file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  the asf licenses this file
+ * to you under the apache license, version 2.0 (the
+ * "license"); you may not use this file except in compliance
+ * with the license.  you may obtain a copy of the license at
+ *
+ *   http://www.apache.org/licenses/license-2.0
+ *
+ * unless required by applicable law or agreed to in writing,
+ * software distributed under the license is distributed on an
+ * "as is" basis, without warranties or conditions of any
+ * kind, either express or implied.  see the license for the
+ * specific language governing permissions and limitations
+ * under the license.
+ *
+ */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';
diff --git a/hmc/puppet/modules/hdp2-hcat/manifests/hcat/service_check.pp b/hmc/puppet/modules/hdp2-hcat/manifests/hcat/service_check.pp
new file mode 100644
index 0000000..2d35800
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hcat/manifests/hcat/service_check.pp
@@ -0,0 +1,55 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hcat::hcat::service_check() 
+{
+  $unique = hdp_unique_id_and_date()
+  $smoke_test_user = $hdp2::params::smokeuser
+  $output_file = "/apps/hive/warehouse/hcatsmoke${unique}"
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp2-hcat::hcat::service_check::begin':}
+
+  file { '/tmp/hcatSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp2-hcat/hcatSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hcatSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'sh /tmp/hcatSmoke.sh hcatsmoke${unique}'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hcatSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp2-hadoop::Exec-hadoop['hcat::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hcat::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hcatSmoke.sh'],
+    before      => Anchor['hdp2-hcat::hcat::service_check::end'] 
+  }
+  
+  anchor{ 'hdp2-hcat::hcat::service_check::end':}
+}
diff --git a/hmc/puppet/modules/hdp2-hcat/manifests/init.pp b/hmc/puppet/modules/hdp2-hcat/manifests/init.pp
new file mode 100644
index 0000000..628f1fb
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hcat/manifests/init.pp
@@ -0,0 +1,72 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hcat(
+  $service_state = $hdp2::params::cluster_client_state
+) inherits hdp2-hcat::params
+{
+  $hcat_config_dir = $hdp2-hcat::params::hcat_conf_dir
+   
+
+  if ($hdp2::params::use_32_bits_on_slaves == false) {
+    $size = 64
+  } else {
+    $size = 32
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {
+    hdp2::package { 'hcat' :
+      ensure => 'uninstalled', 
+      size   => $size 
+    }
+
+    hdp2::directory { $hcat_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Hdp2::Package['hcat'] -> Hdp2::Directory[$hcat_config_dir]
+
+  } elsif ($service_state == 'installed_and_configured') {
+    hdp2::package { 'hcat' : 
+      size => $size 
+    }
+
+    hdp2::directory { $hcat_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp2-hcat::configfile { 'hcat-env.sh':}
+  
+    Hdp2::Package['hcat'] -> Hdp2::Directory[$hcat_config_dir] -> Hdp-hcat::Configfile<||> 
+ } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+### config files
+define hdp2-hcat::configfile()
+{
+  hdp2::configfile { "${hdp2::params::hcat_conf_dir}/${name}":
+    component => 'hcat'
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hcat/manifests/params.pp b/hmc/puppet/modules/hdp2-hcat/manifests/params.pp
new file mode 100644
index 0000000..1d6a1b0
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hcat/manifests/params.pp
@@ -0,0 +1,34 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hcat::params() inherits hdp2::params
+{
+  $hcat_conf_dir = $hdp2::params::hcat_conf_dir
+
+  $hcat_metastore_port = hdp_default("hcat_metastore_port",9933)
+  $hcat_lib = hdp_default("hcat_lib","/usr/lib/hcatalog/share/hcatalog") #TODO: should I remove and just use hcat_dbroot
+
+  ### hcat-env
+  $hcat_dbroot = hdp_default("hadoop/hcat-env/hcat_dbroot",$hcat_lib)
+
+  $hcat_log_dir = hdp_default("hadoop/hcat-env/hcat_log_dir","/var/log/hcatalog")
+
+  $hcat_pid_dir = hdp_default("hadoop/hcat-env/hcat_pid_dir","/var/run/hcatalog")
+
+}
diff --git a/hmc/puppet/modules/hdp2-hcat/templates/hcat-env.sh.erb b/hmc/puppet/modules/hdp2-hcat/templates/hcat-env.sh.erb
new file mode 100644
index 0000000..09ee6cc
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hcat/templates/hcat-env.sh.erb
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME=<%=scope.function_hdp_java_home()%>
+HCAT_PID_DIR=<%=scope.function_hdp_template_var("hcat_pid_dir")%>/
+HCAT_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
+HCAT_CONF_DIR=<%=scope.function_hdp_template_var("hcat_conf_dir")%>
+HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp2::params::hadoop_home")%>}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT=<%=scope.function_hdp_template_var("hcat_dbroot")%>
+USER=<%=scope.function_hdp_user("hcat_user")%>
+METASTORE_PORT=<%=scope.function_hdp_template_var("hcat_metastore_port")%>
diff --git a/hmc/puppet/modules/hdp2-hive/files/hiveSmoke.sh b/hmc/puppet/modules/hdp2-hive/files/hiveSmoke.sh
new file mode 100644
index 0000000..7e03524
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/files/hiveSmoke.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive
diff --git a/hmc/puppet/modules/hdp2-hive/manifests/client.pp b/hmc/puppet/modules/hdp2-hive/manifests/client.pp
new file mode 100644
index 0000000..e6eebab
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/manifests/client.pp
@@ -0,0 +1,40 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hive::client(
+  $service_state = $hdp2::params::cluster_client_state,
+  $hive_server_host = undef
+) inherits hdp2::params
+{ 
+  if ($service_state == 'no_op') {
+   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+    if ($hdp2::params::service_exists['hdp2-hive::server'] != true) {
+      #installs package, creates user, sets configuration
+      class { 'hdp2-hive':
+        service_state => $service_state
+      } 
+      if ($hive_server_host != undef) {
+        Hdp2-Hive::Configfile<||>{hive_server_host => $hive_server_host}
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hive/manifests/hive/service_check.pp b/hmc/puppet/modules/hdp2-hive/manifests/hive/service_check.pp
new file mode 100644
index 0000000..4f7e528
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/manifests/hive/service_check.pp
@@ -0,0 +1,55 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hive::hive::service_check() 
+{
+  $unique = hdp_unique_id_and_date()
+  $smoke_test_user = $hdp2::params::smokeuser
+  $output_file = "/apps/hive/warehouse/hivesmoke${unique}"
+
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp2-hive::hive::service_check::begin':}
+
+  file { '/tmp/hiveSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp2-hive/hiveSmoke.sh",
+    mode => '0755',
+  }
+
+  exec { '/tmp/hiveSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'sh /tmp/hiveSmoke.sh hivesmoke${unique}'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/hiveSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp2-hadoop::Exec-hadoop['hive::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hive::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/hiveSmoke.sh'],
+    before      => Anchor['hdp2-hive::hive::service_check::end'] 
+  }
+  
+  anchor{ 'hdp2-hive::hive::service_check::end':}
+}
diff --git a/hmc/puppet/modules/hdp2-hive/manifests/init.pp b/hmc/puppet/modules/hdp2-hive/manifests/init.pp
new file mode 100644
index 0000000..3be10ca
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/manifests/init.pp
@@ -0,0 +1,82 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hive(
+  $service_state,
+  $server = false
+) 
+{
+  include hdp2-hive::params
+
+  $hive_user = $hdp2-hive::params::hive_user
+  $hive_config_dir = $hdp2-hive::params::hive_conf_dir
+
+  anchor { 'hdp2-hive::begin': }
+  anchor { 'hdp2-hive::end': } 
+
+  if ($service_state == 'uninstalled') {
+    hdp2::package { 'hive' : 
+      ensure => 'uninstalled'
+    }
+
+    hdp2::directory { $hive_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp2-hive::begin'] -> Hdp2::Package['hive'] -> Hdp2::Directory[$hive_config_dir] ->  Anchor['hdp2-hive::end']
+
+  } else {
+    hdp2::package { 'hive' : }
+    if ($server == true ) {
+      class { 'hdp2-hive::mysql-connector': }
+    }
+  
+    hdp2::user{ $hive_user:}
+  
+    hdp2::directory { $hive_config_dir: 
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp2-hive::configfile { ['hive-env.sh','hive-site.xml']: }
+  
+    Anchor['hdp2-hive::begin'] -> Hdp2::Package['hive'] -> Hdp2::User[$hive_user] ->  
+     Hdp2::Directory[$hive_config_dir] -> Hdp2-hive::Configfile<||> ->  Anchor['hdp2-hive::end']
+
+     if ($server == true ) {
+       Hdp2::Package['hive'] -> Hdp2::User[$hive_user] -> Class['hdp2-hive::mysql-connector'] -> Anchor['hdp2-hive::end']
+    }
+  }
+}
+
+### config files
+define hdp2-hive::configfile(
+  $mode = undef,
+  $hive_server_host = undef
+) 
+{
+  hdp2::configfile { "${hdp2-hive::params::hive_conf_dir}/${name}":
+    component        => 'hive',
+    owner            => $hdp2-hive::params::hive_user,
+    mode             => $mode,
+    hive_server_host => $hive_server_host 
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hive/manifests/mysql-connector.pp b/hmc/puppet/modules/hdp2-hive/manifests/mysql-connector.pp
new file mode 100644
index 0000000..c67e6ec
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/manifests/mysql-connector.pp
@@ -0,0 +1,45 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hive::mysql-connector()
+{
+  include hdp2-hive::params
+
+  $hive_lib = $hdp2-hive::params::hive_lib
+  $target = "${hive_lib}/mysql-connector-java.jar"
+  
+  anchor { 'hdp2-hive::mysql-connector::begin':}
+
+   hdp2::package { 'mysql-connector' :
+     require   => Anchor['hdp2-hive::mysql-connector::begin']
+   }
+
+   hdp2::exec { 'hive mkdir -p ${artifact_dir} ;  cp /usr/share/java/mysql-connector-java.jar  ${target}':
+       command => "mkdir -p ${artifact_dir} ;  cp /usr/share/java/mysql-connector-java.jar  ${target}",
+       unless  => "test -f ${target}",
+       creates => $target,
+       path    => ["/bin","/usr/bin/"],
+       require => Hdp2::Package['mysql-connector'],
+       notify  =>  Anchor['hdp2-hive::mysql-connector::end'],
+   }
+
+   anchor { 'hdp2-hive::mysql-connector::end':}
+
+}
diff --git a/hmc/puppet/modules/hdp2-hive/manifests/params.pp b/hmc/puppet/modules/hdp2-hive/manifests/params.pp
new file mode 100644
index 0000000..e7f0711
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/manifests/params.pp
@@ -0,0 +1,58 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hive::params() inherits hdp2::params
+{
+
+  #TODO: will move to globals
+  $hive_metastore_user_name = hdp_default("hadoop/hive-site/hive_metastore_user_name","dbusername")
+  $hive_metastore_user_passwd = hdp_default("hadoop/hive-site/hive_metastore_user_passwd","dbpassword")
+
+  ### users
+  $hive_user = $hdp2::params::hive_user 
+
+  ### common
+  $hive_metastore_port = hdp_default("hive_metastore_port",9083)
+  $hive_lib = hdp_default("hive_lib","/usr/lib/hive/lib/") #TODO: should I remove and just use hive_dbroot
+
+  ### hive-env
+  $hive_conf_dir = $hdp2::params::hive_conf_dir
+
+  $hive_dbroot = hdp_default("hadoop/hive-env/hive_dbroot",$hive_lib)
+
+  $hive_log_dir = hdp_default("hadoop/hive-env/hive_log_dir","/var/log/hive")
+
+  $hive_pid_dir = hdp_default("hadoop/hive-env/hive_pid_dir","/var/run/hive")
+  
+  ### hive-site
+  $hive_database_name = hdp_default("hadoop/hive-site/hive_database_name","hive")
+
+  $hive_metastore_principal = hdp_default("hadoop/hive-site/hive_metastore_principal")
+
+  $hive_metastore_sasl_enabled = hdp_default("hadoop/hive-site/hive_metastore_sasl_enabled","false")
+
+  #TODO: using instead hive_server_host in hdp2::params $hive_metastore_server_host = hdp_default("hadoop/hive-site/hive_metastore_server_host")
+
+  $keytab_path = hdp_default("hadoop/hive-site/keytab_path")
+  
+  ###mysql connector
+  $download_url = $hdp2::params::gpl_artifacts_download_url
+  $mysql_connector_url = "${download_url}/mysql-connector-java-5.1.18.zip"
+  $hive_aux_jars_path =  hdp_default("hive_aux_jars_path","/usr/lib/hcatalog/share/hcatalog/")
+}
diff --git a/hmc/puppet/modules/hdp2-hive/manifests/server.pp b/hmc/puppet/modules/hdp2-hive/manifests/server.pp
new file mode 100644
index 0000000..43bd084
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/manifests/server.pp
@@ -0,0 +1,69 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hive::server(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp2-hive::params
+{ 
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+
+    $hdp2::params::service_exists['hdp2-hive::server'] = true
+
+    #installs package, creates user, sets configuration
+    class{ 'hdp2-hive' : 
+      service_state => $service_state,
+      server        => true
+    } 
+  
+    Hdp2-Hive::Configfile<||>{hive_server_host => $hdp2::params::host_address}
+
+    class { 'hdp2-hive::hdfs-directories' : 
+      service_state => $service_state
+    }
+
+    class { 'hdp2-hive::service' :
+      ensure => $service_state
+    }
+  
+    #top level does not need anchors
+    Class['hdp2-hive'] -> Class['hdp2-hive::hdfs-directories'] -> Class['hdp2-hive::service']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+class hdp2-hive::hdfs-directories($service_state)
+{
+  $hive_user = $hdp2-hive::params::hive_user
+ 
+  hdp2-hadoop::hdfs::directory{ '/apps/hive/warehouse':
+    service_state   => $service_state,
+    owner            => $hive_user,
+    mode             => '777',
+    recursive_chmod  => true
+  }  
+  hdp2-hadoop::hdfs::directory{ "/user/${hive_user}":
+    service_state => $service_state,
+    owner         => $hive_user
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-hive/manifests/service.pp b/hmc/puppet/modules/hdp2-hive/manifests/service.pp
new file mode 100644
index 0000000..0018290
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/manifests/service.pp
@@ -0,0 +1,74 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hive::service(
+  $ensure
+)
+{
+  include $hdp2-hive::params
+  
+  $user = $hdp2-hive::params::hive_user
+  $hadoop_home = $hdp2::params::hadoop_home
+  $hive_log_dir = $hdp2-hive::params::hive_log_dir
+  $cmd = "env HADOOP_HOME=${hadoop_home} nohup hive --service metastore > ${hive_log_dir}/hive.out 2> ${hive_log_dir}/hive.log &"
+  $pid_file = "${hdp2-hive::params::hive_pid_dir}/hive.pid" 
+
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} '"
+    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    #TODO: this needs to be fixed
+    $daemon_cmd = "ps aux | awk '{print \$1,\$2}' | grep \"^${user} \" | awk '{print \$2}' | xargs kill >/dev/null 2>&1"
+    $no_op_test = "ps aux | awk '{print \$1,\$2}' | grep \"^${user} \""
+  } else {
+    $daemon_cmd = undef
+  }
+
+  hdp2-hive::service::directory { $hdp2-hive::params::hive_pid_dir : }
+  hdp2-hive::service::directory { $hdp2-hive::params::hive_log_dir : }
+
+  anchor{'hdp2-hive::service::begin':} -> Hdp2-hive::Service::Directory<||> -> anchor{'hdp2-hive::service::end':}
+  
+  if ($daemon_cmd != undef) {
+    if ($ensure == 'running') {
+      hdp2::exec { $daemon_cmd:
+        command => $daemon_cmd,
+        unless  => $no_op_test
+      }
+    } elsif ($ensure == 'stopped') {
+      hdp2::exec { $daemon_cmd:
+        command => $daemon_cmd,
+        onlyif  => $no_op_test
+      }
+    }
+    Hdp2-hive::Service::Directory<||> -> Hdp2::Exec[$daemon_cmd] -> Anchor['hdp2-hive::service::end']
+  }
+}
+
+define hdp2-hive::service::directory()
+{
+  hdp2::directory_recursive_create { $name: 
+    owner => $hdp2-hive::params::hive_user,
+    mode => '0755',
+    service_state => $ensure,
+    force => true
+  }
+}
+
diff --git a/hmc/puppet/modules/hdp2-hive/templates/hive-env.sh.erb b/hmc/puppet/modules/hdp2-hive/templates/hive-env.sh.erb
new file mode 100644
index 0000000..94a9194
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/templates/hive-env.sh.erb
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp2::params::hadoop_home")%>}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=<%=scope.function_hdp_template_var("hive_conf_dir")%>
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
+export HIVE_AUX_JARS_PATH=<%=scope.function_hdp_template_var("hive_aux_jars_path")%>
diff --git a/hmc/puppet/modules/hdp2-hive/templates/hive-site.xml.erb b/hmc/puppet/modules/hdp2-hive/templates/hive-site.xml.erb
new file mode 100644
index 0000000..cb6e68b
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-hive/templates/hive-site.xml.erb
@@ -0,0 +1,128 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration>
+  <property>
+    <name>hive.metastore.local</name>
+    <value>false</value>
+    <description>controls whether to connect to remove metastore server or
+    open a new metastore server in Hive Client JVM</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://<%=scope.function_hdp_host("hive_mysql_host")%>/<%=scope.function_hdp_template_var("hive_database_name")%>?createDatabaseIfNotExist=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value><%=scope.function_hdp_template_var("hive_metastore_user_name")%></value>
+    <description>username to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value><%=scope.function_hdp_template_var("hive_metastore_user_passwd")%></value>
+    <description>password to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value><%=scope.function_hdp_template_var("hive_metastore_sasl_enabled")%></value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value><%=scope.function_hdp_template_var("keytab_path")%>/hive.service.keytab</value>
+    <description>The path to the Kerberos Keytab file containing the metastore
+     thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value><%=scope.function_hdp_template_var("hive_metastore_principal")%></value>
+    <description>The service principal for the metastore thrift server. The special
+    string _HOST will be replaced automatically with the correct host name.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://<%=scope.function_hdp_host("hive_server_host")%>:<%=scope.function_hdp_template_var("hive_metastore_port")%></value>
+    <description>URI for client to contact metastore server</description>
+  </property>
+
+  <property>
+    <name>hive.semantic.analyzer.factory.impl</name>
+    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
+    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
+  </property>
+
+  <property>
+    <name>hadoop.clientside.fs.operations</name>
+    <value>true</value>
+    <description>FS operations are owned by client</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>true</value>
+    <description>enable or disable the hive client authorization</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+</configuration>
diff --git a/hmc/puppet/modules/hdp2-mysql/files/startMysql.sh b/hmc/puppet/modules/hdp2-mysql/files/startMysql.sh
new file mode 100644
index 0000000..8d48f82
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-mysql/files/startMysql.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldbuser=$1
+mysqldbpasswd=$2
+mysqldbhost=$3
+
+echo "Adding user $mysqldbuser@$mysqldbhost"
+echo "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';" | mysql -u root
+echo "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';" | mysql -u root
+echo "flush privileges;" | mysql -u root
diff --git a/hmc/puppet/modules/hdp2-mysql/manifests/init.pp b/hmc/puppet/modules/hdp2-mysql/manifests/init.pp
new file mode 100644
index 0000000..b0b45e1
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-mysql/manifests/init.pp
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-mysql(){}
+
diff --git a/hmc/puppet/modules/hdp2-mysql/manifests/params.pp b/hmc/puppet/modules/hdp2-mysql/manifests/params.pp
new file mode 100644
index 0000000..a84a580
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-mysql/manifests/params.pp
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-mysql::params() inherits hdp2-hive::params
+{
+   $db_name = "$hdp2-hive::params::hive_database_name"
+   $db_user = $hdp2-hive::params::hive_metastore_user_name
+   $db_pw = $hdp2-hive::params::hive_metastore_user_passwd
+}
diff --git a/hmc/puppet/modules/hdp2-mysql/manifests/server.pp b/hmc/puppet/modules/hdp2-mysql/manifests/server.pp
new file mode 100644
index 0000000..9c880fa
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-mysql/manifests/server.pp
@@ -0,0 +1,71 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-mysql::server(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits  hdp2-mysql::params
+{ 
+  if ($service_state in ['no_op','uninstalled']) {
+   } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+   
+    $db_user = $hdp2-mysql::params::db_user
+    $db_pw = $hdp2-mysql::params::db_pw
+    $db_name = $hdp2-mysql::params::db_name
+    $host = $hdp2::params::hive_mysql_host 
+
+    anchor { 'hdp2-mysql::server::begin':}
+
+    hdp2::package { 'mysql' :
+      size   => 32,
+      require   => Anchor['hdp2-mysql::server::begin']
+    }
+
+    hdp2::exec { 'mysqld start':
+        command => '/etc/init.d/mysqld start',
+        unless  => '/etc/init.d/mysqld status',
+        require => Hdp2::Package['mysql'],
+        notify  => File['/tmp/startMysql.sh']
+    }
+
+    file { '/tmp/startMysql.sh':
+      ensure => present,
+      source => "puppet:///modules/hdp2-mysql/startMysql.sh",
+      mode => '0755',
+      require => Hdp2::Exec['mysqld start'],
+      notify => Exec['/tmp/startMysql.sh']
+    }
+
+    exec { '/tmp/startMysql.sh':
+      command   => "sh /tmp/startMysql.sh ${db_user} ${db_pw} ${host}",
+      tries     => 3,
+      try_sleep => 5,
+      require   => File['/tmp/startMysql.sh'],
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      notify   => Anchor['hdp2-mysql::server::end'],
+      logoutput => "true"
+    }
+
+    anchor { 'hdp2-mysql::server::end':}
+
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_aggregate.php b/hmc/puppet/modules/hdp2-nagios/files/check_aggregate.php
new file mode 100644
index 0000000..946fce9
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_aggregate.php
@@ -0,0 +1,247 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+  $options = getopt ("f:s:n:w:c:t:");
+  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+  $status_file=$options['f'];
+  $status_code=$options['s'];
+  $type=$options['t'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  if ($type == "service" && !array_key_exists('n', $options)) {
+    echo "Service description not provided -n option\n";
+    exit(3);
+  }
+  if ($type == "service") {
+    $service_name=$options['n'];
+    /* echo "DESC: " . $service_name . "\n"; */
+  }
+
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  $counts;
+  if ($type == "service") {
+    $counts=query_alert_count($status_file_content, $service_name, $status_code);
+  } else {
+    $counts=query_host_count($status_file_content, $status_code);
+  }
+
+  if ($counts['total'] == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($counts['actual']/$counts['total'])*100;
+  }
+  if ($percent >= $crit) {
+    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (1);
+  }
+  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+  exit(0);
+
+
+  # Functions
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
+  }
+
+  /* Query host count */
+  function query_host_count ($status_file_content, $status_code) {
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hostcounts_object = array ();
+    $total_hosts = 0;
+    $hosts = 0;
+    foreach ($matches[0] as $object) {
+      $total_hosts++;
+      if (getParameter($object, "current_state") == $status_code) {
+        $hosts++;
+      }
+    }
+    $hostcounts_object['total'] = $total_hosts;
+    $hostcounts_object['actual'] = $hosts;
+    return $hostcounts_object;
+  }
+
+  /* Query Alert counts */
+  function query_alert_count ($status_file_content, $service_name, $status_code) {
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $alertcounts_objects = array ();
+    $total_alerts=0;
+    $alerts=0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "service_description") == $service_name) {
+        $total_alerts++;
+        if (getParameter($object, "current_state") >= $status_code) {
+          $alerts++;
+        }
+      }
+    }
+    $alertcounts_objects['total'] = $total_alerts;
+    $alertcounts_objects['actual'] = $alerts;
+    return $alertcounts_objects;
+  }
+
+  function get_service_type($service_description)
+  {
+    $pieces = explode("::", $service_description);
+    switch ($pieces[0]) {
+      case "NAMENODE":
+        $pieces[0] = "HDFS";
+        break;
+      case "JOBTRACKER":
+        $pieces[0] = "MAPREDUCE";
+        break;
+      case "RESOURCEMANAGER":
+        $pieces[0] = "YARN";
+        break;
+      case "HBASEMASTER":
+        $pieces[0] = "HBASE";
+        break;
+      case "SYSTEM":
+      case "HDFS":
+      case "YARN":
+      case "MAPREDUCE":
+      case "HBASE":
+        break;
+      default:
+        $pieces[0] = "UNKNOWN";
+    }
+    return $pieces[0];
+  }
+
+  function getParameter($object, $key)
+  {
+    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
+    $num_mat = preg_match($pattern, $object, $matches);
+    $value = "";
+    if ($num_mat) {
+      $value = $matches[1];
+    }
+    return $value;
+  }
+
+function indent($json) {
+
+    $result      = '';
+    $pos         = 0;
+    $strLen      = strlen($json);
+    $indentStr   = '  ';
+    $newLine     = "\n";
+    $prevChar    = '';
+    $outOfQuotes = true;
+
+    for ($i=0; $i<=$strLen; $i++) {
+
+        // Grab the next character in the string.
+        $char = substr($json, $i, 1);
+
+        // Are we inside a quoted string?
+        if ($char == '"' && $prevChar != '\\') {
+            $outOfQuotes = !$outOfQuotes;
+
+        // If this character is the end of an element,
+        // output a new line and indent the next line.
+        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
+            $result .= $newLine;
+            $pos --;
+            for ($j=0; $j<$pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        // Add the character to the result string.
+        $result .= $char;
+
+        // If the last character was the beginning of an element,
+        // output a new line and indent the next line.
+        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
+            $result .= $newLine;
+            if ($char == '{' || $char == '[') {
+                $pos ++;
+            }
+
+            for ($j = 0; $j < $pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        $prevChar = $char;
+    }
+
+    return $result;
+}
+
+/* JSON documment format */
+/*
+{
+  "programstatus":{
+    "last_command_check":"1327385743"
+  },
+  "hostcounts":{
+    "up_nodes":"",
+    "down_nodes":""
+  },
+  "hoststatus":[
+    {
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_state":"0",
+      "last_hard_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_check":"1327385564",
+      "current_attempt":"1",
+      "last_hard_state_change":"1327362079",
+      "last_time_up":"1327385574",
+      "last_time_down":"0",
+      "last_time_unreachable":"0",
+      "is_flapping":"0",
+      "last_check":"1327385574",
+      "servicestatus":[
+      ]
+    }
+  ],
+  "servicestatus":[
+    {
+      "service_type":"HDFS",  {HBASE, YARN, MAPREDUCE, HIVE, ZOOKEEPER}
+      "service_description":"HDFS Current Load",
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_attempt":"1",
+      "current_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_hard_state_change":"1327362079",
+      "last_time_ok":"1327385479",
+      "last_time_warning":"0",
+      "last_time_unknown":"0",
+      "last_time_critical":"0",
+      "last_check":"1327385574",
+      "is_flapping":"0"
+    }
+  ]
+}
+*/
+
+?>
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_cpu.pl b/hmc/puppet/modules/hdp2-nagios/files/check_cpu.pl
new file mode 100644
index 0000000..a5680f7
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_cpu.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl -w 
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+use strict;
+use Net::SNMP;
+use Getopt::Long;
+
+# Variable
+my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
+my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
+my $o_host = 	undef;
+my $o_community = undef;
+my $o_warn=	undef;
+my $o_crit=	undef;
+my $o_timeout = 15;
+my $o_port = 161;
+
+sub Usage {
+    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
+}
+
+Getopt::Long::Configure ("bundling");
+GetOptions(
+  'H:s'   => \$o_host,	
+  'C:s'   => \$o_community,	
+  'c:s'   => \$o_crit,        
+  'w:s'   => \$o_warn
+          );
+if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
+  Usage();
+  exit 3;
+}
+$o_warn =~ s/\%//g; 
+$o_crit =~ s/\%//g;
+alarm ($o_timeout);
+$SIG{'ALRM'} = sub {
+ print "Unable to contact host: $o_host\n";
+ exit 3;
+};
+
+# Connect to host
+my ($session,$error);
+($session, $error) = Net::SNMP->session(
+		-hostname  => $o_host,
+		-community => $o_community,
+		-port      => $o_port,
+		-timeout   => $o_timeout
+	  );
+if (!defined($session)) {
+   printf("Error opening session: %s.\n", $error);
+   exit 3;
+}
+
+my $exit_val=undef;
+my $resultat =  (Net::SNMP->VERSION < 4) ?
+	  $session->get_table($base_proc)
+	: $session->get_table(Baseoid => $base_proc);
+
+if (!defined($resultat)) {
+   printf("ERROR: Description table : %s.\n", $session->error);
+   $session->close;
+   exit 3;
+}
+
+$session->close;
+
+my ($cpu_used,$ncpu)=(0,0);
+foreach my $key ( keys %$resultat) {
+  if ($key =~ /$proc_load/) {
+    $cpu_used += $$resultat{$key};
+    $ncpu++;
+  }
+}
+
+if ($ncpu==0) {
+  print "Can't find CPU usage information : UNKNOWN\n";
+  exit 3;
+}
+
+$cpu_used /= $ncpu;
+
+print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
+printf(" %.1f%%",$cpu_used);
+$exit_val=0;
+
+if ($cpu_used > $o_crit) {
+ print " > $o_crit% : CRITICAL\n";
+ $exit_val=2;
+} else {
+  if ($cpu_used > $o_warn) {
+   print " > $o_warn% : WARNING\n";
+   $exit_val=1;
+  }
+}
+print " < $o_warn% : OK\n" if ($exit_val eq 0);
+exit $exit_val;
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_datanode_storage.php b/hmc/puppet/modules/hdp2-nagios/files/check_datanode_storage.php
new file mode 100644
index 0000000..079d904
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_datanode_storage.php
@@ -0,0 +1,63 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the storage capacity remaining on local datanode storage
+ */
+
+  $options = getopt ("h:p:w:c:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-DS-*");
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
+  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
+  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
+
+  $out_msg = "Capacity:[" . $cap_total . 
+             "], Remaining Capacity:[" . $cap_remain . 
+             "], percent_full:[" . $percent_full  . "]";
+  
+  if ($percent_full > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent_full > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
+  }
+?>
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_hdfs_blocks.php b/hmc/puppet/modules/hdp2-nagios/files/check_hdfs_blocks.php
new file mode 100644
index 0000000..bc34fe8
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_hdfs_blocks.php
@@ -0,0 +1,72 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the corrupt or missing blocks % is > threshod
+ * check_jmx -H hostaddress -p port -w 1% -c 1%
+ */
+
+  $options = getopt ("h:p:w:c:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemMetrics");
+  $json_array = json_decode($json_string, true);
+  $m_percent = 0;
+  $c_percent = 0;
+  $object = $json_array['beans'][0];
+  $missing_blocks = $object['MissingBlocks'];
+  $corrupt_blocks = $object['CorruptBlocks'];
+  $total_blocks = $object['BlocksTotal'];
+  if($total_blocks == 0) {
+    $m_percent = 0;
+    $c_percent = 0;
+  } else {
+    $m_percent = ($missing_blocks/$total_blocks)*100;
+    $c_percent = ($corrupt_blocks/$total_blocks)*100;
+  }
+  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
+             ">, missing_blocks:<" . $missing_blocks .
+             ">, total_blocks:<" . $total_blocks . ">";
+
+  if ($m_percent > $crit || $c_percent > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($m_percent > $warn || $c_percent > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
+  }
+?>
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_hdfs_capacity.php b/hmc/puppet/modules/hdp2-nagios/files/check_hdfs_capacity.php
new file mode 100644
index 0000000..31ed463
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_hdfs_capacity.php
@@ -0,0 +1,68 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the % HDFS capacity used >= warn and critical limits.
+ * check_jmx -H hostaddress -p port -w 1 -c 1
+ */
+
+  $options = getopt ("h:p:w:c:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState");
+  $json_array = json_decode($json_string, true);
+  $percent = 0;
+  $object = $json_array['beans'][0];
+  $CapacityUsed = $object['CapacityUsed'];
+  $CapacityRemaining = $object['CapacityRemaining'];
+  $CapacityTotal = $CapacityUsed + $CapacityRemaining;
+  if($CapacityTotal == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($CapacityUsed/$CapacityTotal)*100;
+  }
+  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
+             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
+
+  if ($percent >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%>\n";
+  }
+?>
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_hive_metastore_status.sh b/hmc/puppet/modules/hdp2-nagios/files/check_hive_metastore_status.sh
new file mode 100644
index 0000000..b784327
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_hive_metastore_status.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#The uri is of the form thrift://<hostname>:<port>
+HOST=$1
+PORT=$2
+HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
+out=`hcat $HCAT_URL -e "show databases" 2>&1`
+if [[ "$?" -ne 0 ]]; then 
+  echo "CRITICAL: Error accessing hive-metaserver status [$out]";
+  exit 2;
+fi
+echo "OK: Hive metaserver status OK";
+exit 0;
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_name_dir_status.php b/hmc/puppet/modules/hdp2-nagios/files/check_name_dir_status.php
new file mode 100644
index 0000000..d2bf843
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_name_dir_status.php
@@ -0,0 +1,59 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to namenode, get the jmx-json document
+ * check the NameDirStatuses to find any offline (failed) directories
+ * check_jmx -H hostaddress -p port
+ */
+
+  $options = getopt ("h:p:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo");
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if ($object['NameDirStatuses'] == "") {
+    echo "UNKNOWN: Namenode directory status not available via http://<nn_host>:port/jmx url" . "\n";
+    exit(3);
+  }
+  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
+  $failed_dir_count = count($NameDirStatuses['failed']);
+  $out_msg = "CRITICAL: Offline Namenode directories: ";
+  if ($failed_dir_count > 0) {
+    foreach ($NameDirStatuses['failed'] as $key => $value) {
+      $out_msg = $out_msg . $key . ":" . $value . ", ";
+    }
+    echo $out_msg . "\n";
+    exit (2);
+  }
+  echo "OK: All Namenode directories are active" . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port\n";
+  }
+?>
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_oozie_status.sh b/hmc/puppet/modules/hdp2-nagios/files/check_oozie_status.sh
new file mode 100644
index 0000000..660085c
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_oozie_status.sh
@@ -0,0 +1,34 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#!/usr/bin/env bash
+# OOZIE_URL is of the form http://<hostname>:<port>/oozie
+HOST=$1
+PORT=$2
+JAVA_HOME=$3
+OOZIE_URL="http://$HOST:$PORT/oozie"
+export JAVA_HOME=$JAVA_HOME
+out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
+if [[ "$?" -ne 0 ]]; then 
+  echo "CRITICAL: Error accessing oozie server status [$out]";
+  exit 2;
+fi
+echo "OK: Oozie server status [$out]";
+exit 0;
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_puppet_agent_status.php b/hmc/puppet/modules/hdp2-nagios/files/check_puppet_agent_status.php
new file mode 100644
index 0000000..b9a3565
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_puppet_agent_status.php
@@ -0,0 +1,59 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /* This plugin check if puppet agent is alive */
+
+  $options = getopt ("h:");
+  if (!array_key_exists('h', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+
+  /* Give puppet kick --ping to check if agent is working */
+  $out_arr = array();
+  $cmd = "puppet kick -f --host $host --ping 2>/dev/null";
+  exec ($cmd, $out_arr, $err);
+  if ($err == 0 && check_error($out_arr, "status is success", 0) == 0) {
+    // success
+    echo "OK: Puppet agent is active on [$host]" . "\n";
+    exit(0);
+  } else {
+    // Fail
+    echo "WARN: Puppet agent is down on [$host]" . "\n";
+    exit(1);
+  }
+
+  /* check error function */
+  function check_error ($output, $pattern, $ret) {
+    $ret1=($ret+1)%2;
+    for ($i=0; $i<count($output); $i++) {
+      if (preg_match ("/$pattern/", $output[$i])) {
+        return $ret;
+      }
+    }
+    return $ret1;
+  }
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host>\n";
+  }
+?>
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_rpcq_latency.php b/hmc/puppet/modules/hdp2-nagios/files/check_rpcq_latency.php
new file mode 100644
index 0000000..17f9817
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_rpcq_latency.php
@@ -0,0 +1,67 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
+ * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
+ * Warning and Critical values are in seconds
+ * Service Name = JobTracker, ResourceManager, NameNode
+ */
+
+  $options = getopt ("h:p:w:c:n:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $master=$options['n'];
+  $warn=$options['w'];
+  $crit=$options['c'];
+
+  /* Get the json document */
+  $json_string = file_get_contents("http://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*");
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+
+  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
+  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
+
+  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
+             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
+             "> Secs";
+
+  if ($RpcQueueTime_avg_time >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($RpcQueueTime_avg_time >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -n <JobTracker/ResourceManager/NameNode> -w <warn_in_sec> -c <crit_in_sec>\n";
+  }
+?>
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_templeton_status.sh b/hmc/puppet/modules/hdp2-nagios/files/check_templeton_status.sh
new file mode 100644
index 0000000..de12f65
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_templeton_status.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# out='{"status":"ok","version":"v1"}<status_code:200>'
+HOST=$1
+PORT=$2
+VERSION=$3
+regex="^.*\"status\":\"ok\".*<status_code:200>$"
+out=`curl http://$HOST:$PORT/templeton/$VERSION/status -w '<status_code:%{http_code}>' 2>&1`
+if [[ $out =~ $regex ]]; then 
+  echo "OK: Templeton server status [$out]";
+  exit 0;
+fi
+echo "CRITICAL: Error accessing Templeton server, status [$out]";
+exit 2;
diff --git a/hmc/puppet/modules/hdp2-nagios/files/check_webui.sh b/hmc/puppet/modules/hdp2-nagios/files/check_webui.sh
new file mode 100644
index 0000000..bfad419
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/files/check_webui.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+checkurl () {
+  url=$1
+  curl $url -o /dev/null
+  echo $?
+}
+
+service=$1
+host=$2
+
+if [[ -z "$service" || -z "$host" ]]; then
+  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
+  exit 3;
+fi
+
+case "$service" in
+
+resourcemanager) 
+    rm_weburl="http://$host:8088"
+    if [[ `checkurl "$rm_weburl"` -ne 0 ]]; then 
+      echo "WARNING: YARN ResourceManager web UI not accessible : $rm_weburl";
+      exit 1;
+    fi
+    ;;
+namenode)
+    nnweburl="http://$host:50070"
+    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
+      echo "WARNING: NameNode web UI not accessible : $nnweburl";
+      exit 1;
+    fi
+    ;;
+jobhistoryserver2)
+    mr_jhs_weburl="http://$host:19888/"
+    if [[ `checkurl "$mr_jhs_weburl"` -ne 0 ]]; then 
+      echo "WARNING: MapReduce JobHistoryServer web UI not accessible : $mr_jhs_weburl";
+      exit 1;
+    fi
+    ;;
+hbase)
+    hbaseweburl="http://$host:60010/master-status"
+    jhweburl="http://domU-12-31-39-16-DC-FB.compute-1.internal:51111/jobhistoryhome.jsp"
+    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
+      echo "WARNING: HBase Master web UI not accessible : $hbaseweburl"; 
+      exit 1;
+    fi
+    ;;
+*) echo "UNKNOWN: Invalid service name [$service], valid options [resourcemanager|jobhistoryserver2|hbase|namenode]"
+   exit 3
+   ;;
+esac
+
+echo "OK: Successfully accessed $service Web UI"
+exit 0;
diff --git a/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb b/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb
new file mode 100644
index 0000000..658c2ae
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_all_hosts.rb
@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_nagios_all_hosts, :type => :rvalue) do 
+    hg_defs = function_hdp_template_var("hostgroup_defs")
+    ret = Array.new
+    if hg_defs.kind_of?(Hash)
+      hg_defs.each_value do |info|
+        h = function_hdp_host(info['host_member_info'])
+        unless function_hdp_is_empty(h)
+          ret += [h].flatten 
+        end
+      end
+    end
+    ret.uniq
+  end
+end
diff --git a/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb b/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb
new file mode 100644
index 0000000..3a81d62
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_compute_target_hosts.rb
@@ -0,0 +1,42 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_nagios_compute_target_hosts, :type => :rvalue) do |args|
+    args = function_hdp_args_as_array(args)
+    monitored_hosts = args[0]
+    component_name_mapping = args[1]
+    ret = Hash.new
+    monitored_hosts.each do |host_info|
+      hostname = host_info.keys.first
+      cmps = host_info.values.first
+      cmps.each do |cmp|
+        next unless host_var_info = component_name_mapping[cmp]
+        host_var = host_var_info['host_var']
+	if host_var_info['type'] == 'array'
+          (ret[host_var] ||= Array.new) << hostname
+	elsif host_var_info['type'] == 'scalar'
+	  ret[host_var] = hostname
+        end
+      end
+    end	
+    ret
+  end
+end
diff --git a/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb b/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb
new file mode 100644
index 0000000..58fd0c2
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_members_exist.rb
@@ -0,0 +1,34 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_nagios_members_exist, :type => :rvalue) do |args|
+    args = function_hdp_args_as_array(args)
+    host_type = args[0]
+    hg_defs = function_hdp_template_var("hostgroup_defs")
+    if  hg_defs.kind_of?(Hash)
+      #TODO: see if needed    Puppet::Parser::Functions.autoloader.loadall
+      member_info = (hg_defs[host_type]||{})['host_member_info']
+      member_info and not function_hdp_is_empty(function_hdp_host(member_info))
+    else
+      nil
+    end
+  end
+end
diff --git a/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb b/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb
new file mode 100644
index 0000000..5d777af
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/lib/puppet/parser/functions/hdp_nagios_target_hosts.rb
@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_nagios_target_hosts, :type => :rvalue) do |args|
+    args = function_hdp_args_as_array(args)
+    host_types = args[0]
+#TODO: see if needed       Puppet::Parser::Functions.autoloader.loadall
+    host_types.map{|t|function_hdp_host(t)}.map{|h|function_hdp_is_empty(h) ? [] : [h].flatten}.flatten
+  end
+end
diff --git a/hmc/puppet/modules/hdp2-nagios/manifests/init.pp b/hmc/puppet/modules/hdp2-nagios/manifests/init.pp
new file mode 100644
index 0000000..3327159
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/manifests/init.pp
@@ -0,0 +1,21 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-nagios(){}
diff --git a/hmc/puppet/modules/hdp2-nagios/manifests/nagios/service_check.pp b/hmc/puppet/modules/hdp2-nagios/manifests/nagios/service_check.pp
new file mode 100644
index 0000000..f61b058
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/manifests/nagios/service_check.pp
@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-nagios::nagios::service_check() 
+{
+  
+  anchor { 'hdp2-nagios::nagios::service_check::begin':}
+
+  exec { 'nagios':
+    command   => "/etc/init.d/nagios status | grep 'is running'",
+    tries     => 3,
+    try_sleep => 5,
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    before      => Anchor['hdp2-nagios::nagios::service_check::end'],
+    logoutput => "true"
+  }
+
+  anchor{ 'hdp2-nagios::nagios::service_check::end':}
+}
diff --git a/hmc/puppet/modules/hdp2-nagios/manifests/params.pp b/hmc/puppet/modules/hdp2-nagios/manifests/params.pp
new file mode 100644
index 0000000..180db32
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/manifests/params.pp
@@ -0,0 +1,59 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-nagios::params() inherits hdp2::params
+{   
+ 
+  $nagios_user = "nagios"
+  $nagios_group = "nagios"
+  
+  $conf_dir = hdp_default("nagios_conf_dir","/etc/nagios")
+
+  $plugins_dir = "/usr/lib64/nagios/plugins"
+
+  $nagios_obj_dir = hdp_default("nagios_obj_dir","/etc/nagios/objects")
+  $nagios_host_cfg = hdp_default("nagios_host_cfg","${nagios_obj_dir}/hadoop-hosts.cfg")
+  $nagios_hostgroup_cfg = hdp_default("nagios_hostgroup_cfg","${nagios_obj_dir}/hadoop-hostgroups.cfg")
+  $nagios_servicegroup_cfg = hdp_default("nagios_servicegroup_cfg","${nagios_obj_dir}/hadoop-servicegroups.cfg")
+  $nagios_service_cfg = hdp_default("nagios_service_cfg","${nagios_obj_dir}/hadoop-services.cfg")
+  $nagios_command_cfg = hdp_default("nagios_command_cfg","${nagios_obj_dir}/hadoop-commands.cfg")
+  
+  $nagios_web_login = hdp_default("nagios_web_login","nagiosadmin")
+  $nagios_web_password = hdp_default("nagios_web_password","admin")
+  
+  $dfs_data_dir = $hdp2::params::dfs_data_dir
+   
+  $nagios_contact = hdp_default("nagios/nagios-contacts/nagios_contact","monitor\@monitor.com")
+
+  $hostgroup_defs = {
+    namenode => {host_member_info => 'namenode_host'},
+    snamenode => {host_member_info => 'snamenode_host'},
+    slaves => {host_member_info => 'slave_hosts'},
+    nagios-server => {host_member_info => 'nagios_server_host'},
+    resourcemanager => {host_member_info => 'yarn_rm_host'},
+    ganglia-server => {host_member_info => 'ganglia_server_host'},
+    zookeeper-servers => {host_member_info => 'zookeeper_hosts'},
+    hbasemaster => {host_member_info => 'hbase_master_host'},
+    hiveserver => {host_member_info => 'hive_server_host'},
+    region-servers => {host_member_info => 'hbase_rs_hosts'},
+    oozie-server => {host_member_info => 'oozie_server'},
+    templeton-server => {host_member_info => 'templeton_server_host'}
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-nagios/manifests/server.pp b/hmc/puppet/modules/hdp2-nagios/manifests/server.pp
new file mode 100644
index 0000000..b4243a7
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/manifests/server.pp
@@ -0,0 +1,114 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-nagios::server(
+  $service_state = $hdp2::params::cluster_service_state
+) inherits hdp2-nagios::params
+{
+
+  $nagios_config_dir = $hdp2-nagios::params::conf_dir
+  $plugins_dir = $hdp2-nagios::params::plugins_dir
+  $nagios_obj_dir = $hdp2-nagios::params::nagios_obj_dir
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['uninstalled']) {
+    class { 'hdp2-nagios::server::packages' : 
+      service_state => uninstalled
+    }
+
+    hdp2::exec { "rm -f /var/nagios/rw/nagios.cmd" :
+      command => "rm -f /var/nagios/rw/nagios.cmd",
+      unless => "test ! -e  /var/nagios/rw/nagios.cmd"
+    }
+
+    hdp2::exec { "rm -rf /tmp/hadoop-nagios" :
+      command => "rm -rf /tmp/hadoop-nagios",
+      unless => "test ! -e  /tmp/hadoop-nagios"
+    }
+
+    hdp2::directory { $nagios_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp2::directory { $plugins_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp2::directory { $nagios_obj_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+     Class['hdp2-nagios::server::packages'] -> Exec['rm -f /var/nagios/rw/nagios.cmd'] -> Exec['rm -rf /tmp/hadoop-nagios'] -> Hdp2::Directory[$nagios_config_dir] -> Hdp2::Directory[$plugins_dir] -> Hdp2::Directory[$nagios_obj_dir]
+
+  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+    class { 'hdp2-nagios::server::packages' : }
+
+    hdp2::directory { $nagios_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp2::directory { $plugins_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp2::directory { $nagios_obj_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+
+    class { 'hdp2-nagios::server::config': 
+      notify => Class['hdp2-nagios::server::services']
+    }
+
+    class { 'hdp2-nagios::server::web_permisssions': }
+
+    class { 'hdp2-nagios::server::services': ensure => $service_state}
+
+    Class['hdp2-nagios::server::packages'] -> Hdp2::Directory[$nagios_config_dir] -> Hdp2::Directory[$plugins_dir] -> Hdp2::Directory[$nagios_obj_dir] -> Class['hdp2-nagios::server::config'] -> 
+    Class['hdp2-nagios::server::web_permisssions'] -> Class['hdp2-nagios::server::services']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+class hdp2-nagios::server::web_permisssions()
+{
+  $web_login = $hdp2-nagios::params::nagios_web_login
+  $cmd = "htpasswd -c -b  /etc/nagios/htpasswd.users ${web_login} ${hdp2-nagios::params::nagios_web_password}"
+  $test = "grep ${web_user} /etc/nagios/htpasswd.users"
+  hdp2::exec { $cmd :
+    command => $cmd,
+    unless => $test
+  }
+}
+
+class hdp2-nagios::server::services($ensure)
+{
+  if ($ensure in ['running','stopped']) {
+    service { 'nagios': ensure => $ensure}
+    anchor{'hdp2-nagios::server::services::begin':} ->  Service['nagios'] ->  anchor{'hdp2-nagios::server::services::end':}
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-nagios/manifests/server/config.pp b/hmc/puppet/modules/hdp2-nagios/manifests/server/config.pp
new file mode 100644
index 0000000..15bfa64
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/manifests/server/config.pp
@@ -0,0 +1,75 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-nagios::server::config()
+{
+
+  $host_cfg = $hdp2-nagios::params::nagios_host_cfg
+  
+  hdp2-nagios::server::configfile { 'nagios.cfg': conf_dir => $hdp2-nagios::params::conf_dir }
+  hdp2-nagios::server::configfile { 'hadoop-hosts.cfg': }
+  hdp2-nagios::server::configfile { 'hadoop-hostgroups.cfg': }
+  hdp2-nagios::server::configfile { 'hadoop-servicegroups.cfg': }
+  hdp2-nagios::server::configfile { 'hadoop-services.cfg': }
+  hdp2-nagios::server::configfile { 'hadoop-commands.cfg': }
+  hdp2-nagios::server::configfile { 'contacts.cfg': }
+
+  hdp2-nagios::server::check { 'check_cpu.pl': }
+  hdp2-nagios::server::check { 'check_datanode_storage.php': }
+  hdp2-nagios::server::check { 'check_aggregate.php': }
+  hdp2-nagios::server::check { 'check_hdfs_blocks.php': }
+  hdp2-nagios::server::check { 'check_hdfs_capacity.php': }
+  hdp2-nagios::server::check { 'check_rpcq_latency.php': }
+  hdp2-nagios::server::check { 'check_webui.sh': }
+  hdp2-nagios::server::check { 'check_name_dir_status.php': }
+  hdp2-nagios::server::check { 'check_puppet_agent_status.php': }
+  hdp2-nagios::server::check { 'check_oozie_status.sh': }
+  hdp2-nagios::server::check { 'check_templeton_status.sh': }
+  hdp2-nagios::server::check { 'check_hive_metastore_status.sh': }
+
+  anchor{'hdp2-nagios::server::config::begin':} -> Hdp2-nagios::Server::Configfile<||> -> anchor{'hdp2-nagios::server::config::end':}
+  Anchor['hdp2-nagios::server::config::begin'] -> Hdp2-nagios::Server::Check<||> -> Anchor['hdp2-nagios::server::config::end']
+}
+
+
+###config file helper
+define hdp2-nagios::server::configfile(
+  $owner = $hdp2-nagios::params::nagios_user,
+  $conf_dir = $hdp2-nagios::params::nagios_obj_dir,
+  $mode = undef
+) 
+{
+  
+  hdp2::configfile { "${conf_dir}/${name}":
+    component      => 'nagios',
+    owner          => $owner,
+    mode           => $mode
+  }
+
+  
+}
+
+define hdp2-nagios::server::check()
+{
+  file { "${hdp2-nagios::params::plugins_dir}/${name}":
+    source => "puppet:///modules/hdp2-nagios/${name}", 
+    mode => '0755'
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-nagios/manifests/server/packages.pp b/hmc/puppet/modules/hdp2-nagios/manifests/server/packages.pp
new file mode 100644
index 0000000..2815d0a
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/manifests/server/packages.pp
@@ -0,0 +1,61 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-nagios::server::packages(
+  $service_state = $hdp2::params::cluster_service_state
+)
+{
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['uninstalled']) {
+      hdp2-nagios::server::package { ['nagios-server','nagios-fping','nagios-plugins','nagios-addons']:
+      ensure => 'uninstalled'
+    }
+  } elsif ($service_state in ['running','stopped','installed_and_configured']) {
+    case $hdp2::params::hdp_os_type {
+      centos6, rhel6: {
+        hdp2-nagios::server::package { ['nagios-server','nagios-fping','nagios-plugins','nagios-addons']:
+          ensure => 'present'
+        }
+      }
+      default: {
+        hdp2-nagios::server::package { ['nagios-server','nagios-fping','nagios-plugins','nagios-addons','nagios-php-pecl-json']:
+          ensure => 'present'
+        }
+      }
+    }
+  }
+
+  Hdp2-nagios::Server::Package['nagios-plugins'] -> Hdp2::Package['nagios-addons'] #other order produces package conflict
+
+  anchor{'hdp2-nagios::server::packages::begin':} -> Hdp2-nagios::Server::Package<||> -> anchor{'hdp2-nagios::server::packages::end':}
+  Anchor['hdp2-nagios::server::packages::begin'] -> Hdp2::Package['nagios-addons'] -> Anchor['hdp2-nagios::server::packages::end']
+  Hdp2-nagios::Server::Package['nagios-fping'] -> Hdp2-nagios::Server::Package['nagios-plugins']
+}
+
+
+define hdp2-nagios::server::package(
+  $ensure = present
+)
+{
+  hdp2::package { $name: 
+    ensure      => $ensure,
+    java_needed => false
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-nagios/manifests/target.pp b/hmc/puppet/modules/hdp2-nagios/manifests/target.pp
new file mode 100644
index 0000000..91aeea9
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/manifests/target.pp
@@ -0,0 +1,21 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-nagios::target(){}
diff --git a/hmc/puppet/modules/hdp2-nagios/templates/contacts.cfg.erb b/hmc/puppet/modules/hdp2-nagios/templates/contacts.cfg.erb
new file mode 100644
index 0000000..e967457
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/templates/contacts.cfg.erb
@@ -0,0 +1,76 @@
+###############################################################################
+# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
+#
+# Last Modified: 05-31-2007
+#
+# NOTES: This config file provides you with some example contact and contact
+#        group definitions that you can reference in host and service
+#        definitions.
+#       
+#        You don't need to keep these definitions in a separate file from your
+#        other object definitions.  This has been done just to make things
+#        easier to understand.
+#
+###############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+
+###############################################################################
+###############################################################################
+#
+# CONTACTS
+#
+###############################################################################
+###############################################################################
+
+# Just one contact defined by default - the Nagios admin (that's you)
+# This contact definition inherits a lot of default values from the 'generic-contact' 
+# template which is defined elsewhere.
+
+define contact{
+        contact_name                    <%=scope.function_hdp_template_var("nagios_web_login")%>		; Short name of user
+	use				generic-contact		; Inherit default values from generic-contact template (defined above)
+        alias                           Nagios Admin		; Full name of user
+
+        email                           <%=scope.function_hdp_template_var("nagios_contact")%>	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
+        }
+
+
+
+###############################################################################
+###############################################################################
+#
+# CONTACT GROUPS
+#
+###############################################################################
+###############################################################################
+
+# We only have one contact in this simple configuration file, so there is
+# no need to create more than one contact group.
+
+define contactgroup {
+        contactgroup_name       admins
+        alias                   Nagios Administrators
+        members                 <%=scope.function_hdp_template_var("nagios_web_login")%>
+}
diff --git a/hmc/puppet/modules/hdp2-nagios/templates/hadoop-commands.cfg.erb b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-commands.cfg.erb
new file mode 100644
index 0000000..aee0114
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-commands.cfg.erb
@@ -0,0 +1,88 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# 'check_cpu' check remote cpu load
+define command {
+        command_name    check_cpu
+        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
+       }
+
+# Check data node storage full 
+define command {
+        command_name    check_datanode_storage
+        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
+       }
+
+define command{
+        command_name    check_hdfs_blocks
+        command_line    php $USER1$/check_hdfs_blocks.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
+       }
+
+define command{
+        command_name    check_hdfs_capacity
+        command_line    php $USER1$/check_hdfs_capacity.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$
+       }
+
+define command{
+        command_name    check_aggregate
+        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
+       }
+
+define command{
+        command_name    check_rpcq_latency
+        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$
+       }
+
+define command{
+        command_name    check_nagios
+        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
+       }
+
+define command{
+        command_name    check_webui
+        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$
+       }
+
+define command{
+        command_name    check_name_dir_status
+        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$
+       }
+
+define command{
+        command_name    check_puppet_agent_status
+        command_line    php $USER1$/check_puppet_agent_status.php -h $HOSTADDRESS$
+       }
+  
+define command{
+        command_name    check_oozie_status
+        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$
+       }
+
+define command{
+        command_name    check_templeton_status
+        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ 
+       }
+
+define command{
+        command_name    check_hive_metastore_status
+        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$
+       }
diff --git a/hmc/puppet/modules/hdp2-nagios/templates/hadoop-hostgroups.cfg.erb b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-hostgroups.cfg.erb
new file mode 100644
index 0000000..9bac137
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-hostgroups.cfg.erb
@@ -0,0 +1,20 @@
+<% all_hosts = Array.new -%>
+<%scope.function_hdp_template_var("hostgroup_defs").each do |name,info|-%>
+<%members = scope.function_hdp_host(info['host_member_info'])-%>
+<%unless scope.function_hdp_is_empty(members) -%>
+<% all_hosts += [members].flatten-%>
+define hostgroup {
+        hostgroup_name  <%=name%>
+        alias           <%=name%>
+        members         <%=[members].flatten.join(',')%>
+}
+
+<%end-%>
+<%end%>
+<%unless all_hosts.empty?-%>
+define hostgroup {
+        hostgroup_name  all-servers
+        alias           All Servers
+        members         <%=all_hosts.uniq.join(',')%>
+}
+<%end%>
\ No newline at end of file
diff --git a/hmc/puppet/modules/hdp2-nagios/templates/hadoop-hosts.cfg.erb b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-hosts.cfg.erb
new file mode 100644
index 0000000..4e97548
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-hosts.cfg.erb
@@ -0,0 +1,16 @@
+<%scope.function_hdp_nagios_all_hosts().each do |host|-%>
+define host {
+        alias        <%=host%>
+        host_name    <%=host%>
+        use          linux-server
+        address      <%=host%>
+        check_interval         0.25
+        retry_interval         0.25
+        max_check_attempts     4
+        notifications_enabled     1
+        first_notification_delay  0     # Send notification soon after change in the hard state
+        notification_interval     0     # Send the notification once
+        notification_options      d,u,r
+}
+
+<%end%>
diff --git a/hmc/puppet/modules/hdp2-nagios/templates/hadoop-servicegroups.cfg.erb b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-servicegroups.cfg.erb
new file mode 100644
index 0000000..9c2388e
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-servicegroups.cfg.erb
@@ -0,0 +1,44 @@
+define servicegroup {
+  servicegroup_name  HDFS
+  alias  HDFS Checks
+}
+define servicegroup {
+  servicegroup_name YARN 
+  alias  YARN Checks
+}
+define servicegroup {
+  servicegroup_name  MAPREDUCE
+  alias  MAPREDUCE Checks
+}
+define servicegroup {
+  servicegroup_name  HBASE
+  alias  HBASE Checks
+}
+define servicegroup {
+  servicegroup_name  OOZIE
+  alias  OOZIE Checks
+}
+define servicegroup {
+  servicegroup_name  TEMPLETON
+  alias  TEMPLETON Checks
+}
+define servicegroup {
+  servicegroup_name  NAGIOS
+  alias  NAGIOS Checks
+}
+define servicegroup {
+  servicegroup_name  GANGLIA
+  alias  GANGLIA Checks
+}
+define servicegroup {
+  servicegroup_name  HIVE-METASTORE
+  alias  HIVE-METASTORE Checks
+}
+define servicegroup {
+  servicegroup_name  ZOOKEEPER
+  alias  ZOOKEEPER Checks
+}
+define servicegroup {
+  servicegroup_name  PUPPET
+  alias  Puppet Agent Checks
+}
diff --git a/hmc/puppet/modules/hdp2-nagios/templates/hadoop-services.cfg.erb b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-services.cfg.erb
new file mode 100644
index 0000000..89993da
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/templates/hadoop-services.cfg.erb
@@ -0,0 +1,467 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# NAGIOS SERVER Check (status log update)
+<%if scope.function_hdp_nagios_members_exist('nagios-server')-%>
+define service {
+        name                            hadoop-service
+        use                             generic-service
+        notification_options            w,u,c
+        first_notification_delay        0
+        notification_interval           0     # Send the notification once
+}
+
+define service {        
+        hostgroup_name          all-servers
+        use                     hadoop-service
+        service_description     PUPPET::Puppet agent down
+        servicegroups           PUPPET
+        check_command           check_tcp!8139!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {        
+        hostgroup_name          nagios-server        
+        use                     hadoop-service
+        service_description     NAGIOS::Nagios status log staleness
+        servicegroups           NAGIOS
+        check_command           check_nagios!10!/var/nagios/status.dat!/usr/bin/nagios
+        normal_check_interval   5
+        retry_check_interval    0.5
+        max_check_attempts      2
+}
+
+# NAGIOS SERVER HDFS Checks
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes storage full
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::Storage full"!10%!30%
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes down
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::Process down"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+# NAGIOS SERVER YARN Checks
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     YARN::Percent NodeManagers down
+        servicegroups           YARN
+        check_command           check_aggregate!"NODEMANAGER::Process down"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+# NAGIOS SERVER ZOOKEEPER Checks
+<%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     ZOOKEEPER::Percent zookeeper servers down
+        servicegroups           ZOOKEEPER
+        check_command           check_aggregate!"ZKSERVERS::ZKSERVERS Process down"!35%!70%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+<%end-%>
+
+# NAGIOS SERVER HBASE Checks
+<%if scope.function_hdp_nagios_members_exist('hbasemaster')-%>
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HBASE::Percent region servers down
+        servicegroups           HBASE
+        check_command           check_aggregate!"REGIONSERVER::Process down"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+<%end-%>
+<%end-%>
+
+# GANGLIA SERVER Checks
+<%if scope.function_hdp_nagios_members_exist('ganglia-server')-%>
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia [gmetad] Process down
+        servicegroups           GANGLIA
+        check_command           check_tcp!8651!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for slaves
+        servicegroups           GANGLIA
+        check_command           check_tcp!8660!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for namenode
+        servicegroups           GANGLIA
+        check_command           check_tcp!8661!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for resourcemanager
+        servicegroups           GANGLIA
+        check_command           check_tcp!8662!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+<%if scope.function_hdp_nagios_members_exist('hbasemaster')-%>
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster
+        servicegroups           GANGLIA
+        check_command           check_tcp!8663!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+<%end-%>
+<%end-%>
+
+<%if scope.function_hdp_nagios_members_exist('snamenode')-%>
+# Secondary namenode checks
+define service {
+        hostgroup_name          snamenode
+        use                     hadoop-service
+        service_description     NAMENODE::Secondary Namenode Process down
+        servicegroups           HDFS
+        check_command           check_tcp!50090!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+<%end-%>
+<%if scope.function_hdp_nagios_members_exist('namenode')-%>
+# HDFS Checks
+define service {
+        hostgroup_name          namenode
+        use                     hadoop-service
+        service_description     NAMENODE::Namenode Web UI down
+        servicegroups           HDFS
+        check_command           check_webui!namenode
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          namenode
+        use                     hadoop-service
+        service_description     NAMENODE::Namenode Edit logs directory status
+        servicegroups           HDFS
+        check_command           check_name_dir_status!50070
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {        
+        hostgroup_name          namenode        
+        use                     hadoop-service
+        service_description     NAMENODE::Namenode Host CPU utilization
+        servicegroups           HDFS
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          namenode
+        use                     hadoop-service
+        service_description     NAMENODE::Namenode Process down
+        servicegroups           HDFS
+        check_command           check_tcp!8020!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          namenode
+        use                     hadoop-service
+        service_description     HDFS::Corrupt/Missing blocks
+        servicegroups           HDFS
+        check_command           check_hdfs_blocks!50070!0%!0%
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          namenode
+        use                     hadoop-service
+        service_description     HDFS::HDFS Capacity utilization
+        servicegroups           HDFS
+        check_command           check_hdfs_capacity!50070!80%!90%
+        normal_check_interval   10
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          namenode
+        use                     hadoop-service
+        service_description     HDFS::Namenode RPC Latency
+        servicegroups           HDFS
+        check_command           check_rpcq_latency!NameNode!50070!3000!5000
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+<%end-%>
+
+# YARN Checks
+<%if scope.function_hdp_nagios_members_exist('resourcemanager')-%>
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager Web UI down
+        servicegroups           YARN
+        check_command           check_webui!resourcemanager
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::JobHistoryServer Web UI down
+        servicegroups           YARN
+        check_command           check_webui!jobhistoryserver2
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
+        servicegroups           YARN
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager Process down
+        servicegroups           YARN
+        check_command           check_tcp!8088!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     YARN::ResourceManager RPC Latency
+        servicegroups           YARN
+        check_command           check_rpcq_latency!ResourceManager!8088!3000!5000
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+<%end-%>
+
+<%if scope.function_hdp_nagios_members_exist('slaves')-%>
+# HDFS::DATANODE Checks
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::Process down
+        servicegroups           HDFS
+        check_command           check_tcp!50010!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::Storage full
+        servicegroups           HDFS
+        check_command           check_datanode_storage!50075!90%!90%
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      2
+}
+
+# YARN::NODEMANAGER Checks 
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     NODEMANAGER::Process down
+        servicegroups           YARN
+        check_command           check_tcp!50060!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+<%end-%>
+
+<%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
+# ZOOKEEPER Checks
+define service {
+        hostgroup_name          zookeeper-servers
+        use                     hadoop-service
+        service_description     ZKSERVERS::ZKSERVERS Process down
+        servicegroups           ZOOKEEPER
+        check_command           check_tcp!2181!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+<%end-%>
+
+<%if scope.function_hdp_nagios_members_exist('hbasemaster')-%>
+# HBASE::REGIONSERVER Checks
+define service {
+        hostgroup_name          region-servers
+        use                     hadoop-service
+        service_description     REGIONSERVER::Process down
+        servicegroups           HBASE
+        check_command           check_tcp!60020!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+# HBASE:: MASTER Checks
+define service {
+        hostgroup_name          hbasemaster
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Web UI down
+        servicegroups           HBASE
+        check_command           check_webui!hbase
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          hbasemaster
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBaseMaster CPU utilization
+        servicegroups           HBASE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          hbasemaster
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBaseMaster Process down
+        servicegroups           HBASE
+        check_command           check_tcp!60000!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+<%end-%>
+
+<%if scope.function_hdp_nagios_members_exist('hiveserver')-%>
+# HIVE Metastore check
+define service {
+        hostgroup_name          hiveserver
+        use                     hadoop-service
+        service_description     HIVE-METASTORE::HIVE-METASTORE status check
+        servicegroups           HIVE-METASTORE
+        check_command           check_hive_metastore_status!9083
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+<%end-%>
+<%if scope.function_hdp_nagios_members_exist('oozie-server')-%>
+# Oozie check
+define service {
+        hostgroup_name          oozie-server
+        use                     hadoop-service
+        service_description     OOZIE::Oozie status check
+        servicegroups           OOZIE
+        check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java32_home") %>
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+<%end-%>
+<%if scope.function_hdp_nagios_members_exist('templeton-server')-%>
+# Templeton check
+define service {
+        hostgroup_name          templeton-server
+        use                     hadoop-service
+        service_description     TEMPLETON::Templeton status check
+        servicegroups           TEMPLETON
+        check_command           check_templeton_status!50111!v1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+<%end-%>
diff --git a/hmc/puppet/modules/hdp2-nagios/templates/nagios.cfg.erb b/hmc/puppet/modules/hdp2-nagios/templates/nagios.cfg.erb
new file mode 100644
index 0000000..0e99e2e
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-nagios/templates/nagios.cfg.erb
@@ -0,0 +1,1349 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
+#
+# Read the documentation for more information on this configuration
+# file.  I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+# Last Modified: 12-14-2008
+#
+##############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes.  This should be the first option specified 
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+# You can specify individual object config files as shown below:
+cfg_file=/etc/nagios/objects/commands.cfg
+cfg_file=/etc/nagios/objects/contacts.cfg
+cfg_file=/etc/nagios/objects/timeperiods.cfg
+cfg_file=/etc/nagios/objects/templates.cfg
+
+# Definitions for monitoring the local (Linux) host
+#cfg_file=/etc/nagios/objects/localhost.cfg
+
+# Definitions for monitoring a Windows machine
+#cfg_file=/etc/nagios/objects/windows.cfg
+
+# Definitions for monitoring a router/switch
+#cfg_file=/etc/nagios/objects/switch.cfg
+
+# Definitions for monitoring a network printer
+#cfg_file=/etc/nagios/objects/printer.cfg
+
+# Definitions for hadoop servers
+cfg_file=<%=scope.function_hdp_template_var("nagios_host_cfg")%>
+cfg_file=<%=scope.function_hdp_template_var("nagios_hostgroup_cfg")%>
+cfg_file=<%=scope.function_hdp_template_var("nagios_servicegroup_cfg")%>
+cfg_file=<%=scope.function_hdp_template_var("nagios_service_cfg")%>
+cfg_file=<%=scope.function_hdp_template_var("nagios_command_cfg")%>
+
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir=/etc/nagios/servers
+#cfg_dir=/etc/nagios/printers
+#cfg_dir=/etc/nagios/switches
+#cfg_dir=/etc/nagios/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts.  The CGIs read object definitions from 
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file.  You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start 
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/nagios/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions.  The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file=/etc/nagios/resource.cfg
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored.  Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+#  restarts.
+
+status_file=/var/nagios/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and 
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.  
+# You can either supply a username or a UID.
+
+nagios_user=nagios
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.  
+# You can either supply a group name or a GID.
+
+nagios_group=nagios
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below).  By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side.  If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later.  If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute.  If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly 
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody').  Permissions should be set at the 
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+
+command_file=/var/nagios/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming 
+# external commands before they are processed.  As external commands 
+# are processed by the daemon, they are removed from the buffer.  
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file=/var/nagios/nagios.pid
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc.  This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/nagios/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values:  0      = Broker nothing
+#         -1      = Broker everything
+#         <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup.  Use multiple directives if you want
+# to load more than one module.  Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+#    1. Shutdown Nagios, replace the module file, restart Nagios
+#    2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+#   broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+#	n	= None - don't rotate the log
+#	h	= Hourly rotation (top of the hour)
+#	d	= Daily rotation (midnight every day)
+#	w	= Weekly rotation (midnight on Saturday evening)
+#	m	= Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be 
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1.  If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0.  If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0.  If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0.  If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1.  If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option.  In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0.  If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0.  If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!  This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed.  Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts.  Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks.  Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+#       s       = Use "smart" interleave factor calculation
+#       x       = Use an interleave factor of x, where x is a
+#                 number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed.  Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of 
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized.  A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that  a single
+# check result reaper event will be allowed to run before 
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!  
+
+check_result_path=/var/nagios/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid.  Files older than this 
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks.  Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state 
+# information when checking host and service dependencies. Normally 
+# Nagios will only use the latest hard host or service state when 
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option. 
+# Values:
+#  0 = Don't use soft state dependencies (default) 
+#  1 = Use soft state dependencies 
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time.  This can help balance the load on
+# the monitoring server.  
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks.  This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled.  Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off.  Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands.  All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down.  Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor.  This is useful for 
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts.  Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down.  The state 
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the retain_state_information
+# variable is set to 1.
+
+state_retention_file=/var/nagios/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting.  If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set 
+# program status variables based on the values saved in the
+# retention file.  If you want to use retained program status
+# information, set this value to 1.  If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file.  If you
+# If you want to use retained scheduling info, set this
+# value to 1.  If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.  
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options.  For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options.  For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files.  Setting this to 60 means
+# that each interval is one minute long (60 seconds).  Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# CHECK FOR UPDATES
+# This option determines whether Nagios will automatically check to
+# see if new updates (releases) are available.  It is recommend that you
+# enable this option to ensure that you stay on top of the latest critical
+# patches to Nagios.  Nagios is critical to you - make sure you keep it in
+# good shape.  Nagios will check once a day for new updates. Data collected
+# by Nagios Enterprises from the update check is processed in accordance 
+# with our privacy policy - see http://api.nagios.org for details.
+
+check_for_updates=1
+
+
+
+# BARE UPDATE CHECK
+# This option deterines what data Nagios will send to api.nagios.org when
+# it checks for updates.  By default, Nagios will send information on the 
+# current version of Nagios you have installed, as well as an indicator as
+# to whether this was a new installation or not.  Nagios Enterprises uses
+# this data to determine the number of users running specific version of 
+# Nagios.  Enable this option if you do not want this information to be sent.
+
+bare_update_check=0
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default).  Otherwise set this value to 1 to
+# enable the aggressive check option.  Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started.  Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks.  If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below).  Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed.  These commands are executed only if the
+# enable_performance_data option (above) is set to 1.  The command
+# argument is the short name of a command definition that you 
+# define in your host configuration file.  Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files.  The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text.  A newline is automatically added after each write
+# to the performance data file.  Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below.  A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files.  The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_services option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_hosts option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios.  This option is useful
+# if you have distributed or failover monitoring setup.  In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts.  If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance.  Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT.  By default, a passive host check
+# result will put a host into a HARD state type.  This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically 
+# check for orphaned host service checks.  Since service checks are
+# not rescheduled until the results of their previous execution 
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled.  A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks.  Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results.  If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results.  If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".  
+# Flapping occurs when a host or service changes between
+# states too frequently.  When Nagios detects that a 
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping.  Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+#         0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does.  This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+#	us		(MM-DD-YYYY HH:MM:SS)
+#	euro    	(DD-MM-YYYY HH:MM:SS)
+#	iso8601		(YYYY-MM-DD HH:MM:SS)
+#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=us
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in.  If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path 
+# to include your timezone.  Example:
+#
+#   <Directory "/usr/local/nagios/sbin/">
+#      SetEnv TZ "Australia/Brisbane"
+#      ...
+#   </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located.  If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file=/usr/bin/p1.pl
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime.  This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more 
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc.  This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+#	$HOSTOUTPUT$
+#	$HOSTPERFDATA$
+#	$HOSTACKAUTHOR$
+#	$HOSTACKCOMMENT$
+#	$SERVICEOUTPUT$
+#	$SERVICEPERFDATA$
+#	$SERVICEACKAUTHOR$
+#	$SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files.  Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression 
+# matching takes place in the object config files.  This option
+# only has an effect if regular expression matching is enabled
+# (see above).  If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?).  If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=nagios@localhost
+admin_pager=pagenagios@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon.  Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes.  Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+#         0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+#         0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed.  Enabling this option can cause performance issues in 
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+#         0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks).  If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+#        0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks).  Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems.  Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this.  If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+#        0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file.  OR values together to log multiple
+# types of information.
+# Values: 
+#          -1 = Everything
+#          0 = Nothing
+#	   1 = Functions
+#          2 = Configuration
+#          4 = Process information
+#	   8 = Scheduled events
+#          16 = Host/service checks
+#          32 = Notifications
+#          64 = Event broker
+#          128 = External commands
+#          256 = Commands
+#          512 = Scheduled downtime
+#          1024 = Comments
+#          2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+#         1 = More detailed
+#         2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/log/nagios/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file.  If
+# the file grows larger than this size, it will be renamed with a .old
+# extension.  If a file already exists with a .old extension it will
+# automatically be deleted.  This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+
diff --git a/hmc/puppet/modules/hdp2-oozie/files/oozieSmoke.sh b/hmc/puppet/modules/hdp2-oozie/files/oozieSmoke.sh
new file mode 100644
index 0000000..bfd8412
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/files/oozieSmoke.sh
@@ -0,0 +1,82 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+function getValueFromField {
+  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
+  return $?
+}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  su - ${smoke_test_user} -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the couner and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export oozie_conf_dir=$1
+export hadoop_conf_dir=$2
+export smoke_test_user=$3
+export OOZIE_EXIT_CODE=0
+export RESOURCEMANAGER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
+export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
+export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url`
+export OOZIE_EXAMPLES_DIR=/usr/share/doc/oozie-*/
+cd $OOZIE_EXAMPLES_DIR
+
+tar -zxf oozie-examples.tar.gz
+sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8021|jobTracker=$RESOURCEMANAGER|g" examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:9001|jobTracker=$RESOURCEMANAGER|g" examples/apps/map-reduce/job.properties
+sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
+su - ${smoke_test_user} -c "hadoop dfs -rmr examples"
+su - ${smoke_test_user} -c "hadoop dfs -rmr input-data"
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+
+cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
+job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id"
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE
diff --git a/hmc/puppet/modules/hdp2-oozie/manifests/client.pp b/hmc/puppet/modules/hdp2-oozie/manifests/client.pp
new file mode 100644
index 0000000..5191d47
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/manifests/client.pp
@@ -0,0 +1,40 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-oozie::client(
+  $service_state = $hdp2::params::cluster_client_state,
+  $oozie_server = undef
+) inherits hdp2::params
+{ 
+  if ($service_state == 'no_op') {
+   } elsif ($service_state in ['installed_and_configured','uninstalled']) {
+     if ($hdp2::params::service_exists['hdp2-oozie::server'] != true) {
+       #installs package, creates user, sets configuration
+       class { 'hdp2-oozie' :
+         service_state => $service_state
+       }
+      if ($oozie_server != undef) {
+        Hdp2-Oozie::Configfile<||>{oozie_server => $oozie_server}
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-oozie/manifests/download-ext-zip.pp b/hmc/puppet/modules/hdp2-oozie/manifests/download-ext-zip.pp
new file mode 100644
index 0000000..88c8046
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/manifests/download-ext-zip.pp
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-oozie::download-ext-zip()
+{
+  include hdp2-oozie::params
+
+  hdp2::package { 'extjs' : }
+
+}
diff --git a/hmc/puppet/modules/hdp2-oozie/manifests/init.pp b/hmc/puppet/modules/hdp2-oozie/manifests/init.pp
new file mode 100644
index 0000000..73c7d17
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/manifests/init.pp
@@ -0,0 +1,87 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-oozie(
+  $service_state = undef,
+  $server = false,
+  $setup = false
+)
+{
+  include hdp2-oozie::params 
+ 
+  $oozie_user = $hdp2-oozie::params::oozie_user
+  $oozie_config_dir = $hdp2-oozie::params::conf_dir
+  
+  if ($service_state == 'uninstalled') {
+    hdp2::package { 'oozie-client' : 
+      ensure => 'uninstalled'
+    }
+    if ($server == true ) {
+      hdp2::package { 'oozie-server' :
+        ensure => 'uninstalled'
+      }
+    }
+    hdp2::directory { $oozie_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    anchor { 'hdp2-oozie::begin': } -> Hdp2::Package['oozie-client'] -> Hdp2::Directory[$oozie_config_dir] ->  anchor { 'hdp2-oozie::end': }
+
+    if ($server == true ) {
+       Hdp2::Package['oozie-server'] -> Hdp2::Package['oozie-client'] ->  Anchor['hdp2-oozie::end']
+     }
+  } else {
+    hdp2::package { 'oozie-client' : }
+    if ($server == true ) {
+      hdp2::package { 'oozie-server':}
+      class { 'hdp2-oozie::download-ext-zip': }
+    }
+
+     hdp2::user{ $oozie_user:}
+
+     hdp2::directory { $oozie_config_dir: 
+      service_state => $service_state,
+      force => true
+    }
+
+     hdp2-oozie::configfile { ['oozie-site.xml','oozie-env.sh','oozie-log4j.properties']: }
+
+    anchor { 'hdp2-oozie::begin': } -> Hdp2::Package['oozie-client'] -> Hdp2::User[$oozie_user] -> Hdp2::Directory[$oozie_config_dir] -> Hdp2-oozie::Configfile<||> -> anchor { 'hdp2-oozie::end': }
+
+     if ($server == true ) { 
+       Hdp2::Package['oozie-server'] -> Hdp2::Package['oozie-client'] -> Hdp2::User[$oozie_user] ->   Class['hdp2-oozie::download-ext-zip'] ->  Anchor['hdp2-oozie::end']
+     }
+ }
+}
+
+### config files
+define hdp2-oozie::configfile(
+  $mode = undef,
+  $oozie_server = undef
+) 
+{
+  hdp2::configfile { "${hdp2-oozie::params::conf_dir}/${name}":
+    component       => 'oozie',
+    owner           => $hdp2-oozie::params::oozie_user,
+    mode            => $mode,
+    oozie_server    => $oozie_server
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-oozie/manifests/oozie/service_check.pp b/hmc/puppet/modules/hdp2-oozie/manifests/oozie/service_check.pp
new file mode 100644
index 0000000..67a7890
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/manifests/oozie/service_check.pp
@@ -0,0 +1,53 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-oozie::oozie::service_check()
+{
+  include hdp2-oozie::params
+
+  $smoke_shell_files = ['oozieSmoke.sh']
+  anchor { 'hdp2-oozie::oozie::service_check::begin':}
+
+  hdp2-oozie::smoke_shell_file { $smoke_shell_files: }
+
+  anchor{ 'hdp2-oozie::oozie::service_check::end':}
+}
+
+define hdp2-oozie::smoke_shell_file()
+{
+  $smoke_test_user = $hdp2::params::smokeuser
+  $conf_dir = $hdp2::params::oozie_conf_dir
+  $hadoopconf_dir = $hdp2::params::hadoop_conf_dir 
+
+  file { '/tmp/oozieSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp2-oozie/oozieSmoke.sh",
+    mode => '0755'
+  }
+
+  exec { '/tmp/oozieSmoke.sh':
+    command   => "sh /tmp/oozieSmoke.sh ${conf_dir} ${hadoopconf_dir} ${smoke_test_user}",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/oozieSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    logoutput => "true"
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-oozie/manifests/params.pp b/hmc/puppet/modules/hdp2-oozie/manifests/params.pp
new file mode 100644
index 0000000..d19a7ba
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/manifests/params.pp
@@ -0,0 +1,51 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-oozie::params() inherits hdp2::params
+{
+  $oozie_user = $hdp2::params::oozie_user 
+
+  ###ext url
+  $download_url = $hdp2::params::gpl_artifacts_download_url
+  $ext_zip_url = "${download_url}/ext-2.2.zip"
+  $ext_zip_name = hdp_default("ext_zip_name","ext-2.2.zip")
+  $ext_zip_dir = hdp_default("ext_zip_dir","/usr/share/HDP-oozie/")
+
+  ### oozie-env
+  $conf_dir = $hdp2::params::oozie_conf_dir
+  $hadoop_prefix = hdp_default("hadoop_prefix","/usr")
+
+  ### oozie-env
+  $oozie_log_dir = hdp_default("hadoop/oozie-env/oozie_log_dir","/var/log/oozie")
+
+  $oozie_pid_dir = hdp_default("oozie_pid_dir","/var/run/oozie/")
+  $oozie_pid_file = hdp_default("hadoop/oozie-env/oozie_pid_file","$oozie_pid_dir/oozie.pid")
+
+  $oozie_data_dir = hdp_default("hadoop/oozie-env/oozie_data_dir","/var/data/oozie")
+
+  $oozie_tmp_dir = hdp_default("hadoop/oozie-env/oozie_tmp_dir","/var/tmp/oozie")
+
+  $oozie_lib_dir = hdp_default("hadoop/oozie-env/oozie_lib_dir","/var/lib/oozie/")
+  ### oozie-site
+  $oozie_sasl_enabled = hdp_default("hadoop/oozie-site/oozie_sasl_enabled","false")
+  $oozie_security_type = hdp_default("hadoop/oozie-site/oozie_security_type","simple")
+  $realm = hdp_default("hadoop/oozie-site/realm","EXAMPLE.COM")
+  $keytab_path = hdp_default("hadoop/oozie-site/keytab_path","/etc/security/keytabs/")
+}
diff --git a/hmc/puppet/modules/hdp2-oozie/manifests/server.pp b/hmc/puppet/modules/hdp2-oozie/manifests/server.pp
new file mode 100644
index 0000000..64d85ba
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/manifests/server.pp
@@ -0,0 +1,65 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-oozie::server(
+  $service_state = $hdp2::params::cluster_service_state,
+  $setup = false,
+  $opts = {}
+) inherits  hdp2-oozie::params
+{   
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $hdp2::params::service_exists['hdp2-oozie::server'] = true
+
+    #installs package, creates user, sets configuration
+    class{ 'hdp2-oozie' : 
+      service_state => $service_state,
+      server        => true
+    } 
+  
+    Hdp2-Oozie::Configfile<||>{oozie_server => $hdp2::params::oozie_server}
+
+    class { 'hdp2-oozie::hdfs-directories' : 
+      service_state => $service_state
+    }
+
+    class { 'hdp2-oozie::service' :
+      ensure       => $service_state,
+      setup         => $setup
+    }
+  
+    #top level does not need anchors
+    Class['hdp2-oozie'] -> Class['hdp2-oozie::hdfs-directories'] -> Class['hdp2-oozie::service']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+class hdp2-oozie::hdfs-directories($service_state)
+{
+ $oozie_user = $hdp2-oozie::params::oozie_user
+  hdp2-hadoop::hdfs::directory{ '/user/oozie':
+    service_state => $service_state,
+    owner => $oozie_user,
+    mode  => '775',
+    recursive_chmod => true
+  }  
+}
+
diff --git a/hmc/puppet/modules/hdp2-oozie/manifests/service.pp b/hmc/puppet/modules/hdp2-oozie/manifests/service.pp
new file mode 100644
index 0000000..3665f64
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/manifests/service.pp
@@ -0,0 +1,94 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-oozie::service(
+  $ensure,
+  $setup,
+  $initial_wait = undef
+)
+{
+  include $hdp2-oozie::params
+  
+  $user = "$hdp2-oozie::params::oozie_user"
+  $hadoop_home = $hdp2-oozie::params::hadoop_prefix
+  $oozie_tmp = $hdp2-oozie::params::oozie_tmp_dir
+  $cmd = "env HADOOP_HOME=${hadoop_home} /usr/sbin/oozie_server.sh"
+  $pid_file = "${hdp2-oozie::params::oozie_pid_dir}/oozie.pid" 
+  $jar_location = $hdp2::params::hadoop_client_jar_location
+  $ext_js_path = "${hdp2-oozie::params::ext_zip_dir}/${hdp2-oozie::params::ext_zip_name}"
+
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  'cd ${oozie_tmp} ; /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.23.1 $jar_location -extjs $ext_js_path; /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run Validate DB Connection; /usr/lib/oozie/bin/oozie-start.sh'"
+    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    $daemon_cmd  = "su - ${user} -c  'cd ${oozie_tmp} ; /usr/lib/oozie/bin/oozie-stop.sh'"
+    $no_op_test = undef
+  } else {
+    $daemon_cmd = undef
+  }
+
+  hdp2-oozie::service::directory { $hdp2-oozie::params::oozie_pid_dir : }
+  hdp2-oozie::service::directory { $hdp2-oozie::params::oozie_log_dir : }
+  hdp2-oozie::service::directory { $hdp2-oozie::params::oozie_tmp_dir : }
+  hdp2-oozie::service::directory { $hdp2-oozie::params::oozie_data_dir : }
+  hdp2-oozie::service::directory { $hdp2-oozie::params::oozie_lib_dir : }
+  if ($ensure == 'running') {
+    hdp2-oozie::service::createsymlinks { 'yarn-mapred-site.xml' : }
+  }
+
+  anchor{'hdp2-oozie::service::begin':} -> Hdp2-oozie::Service::Directory<||> -> anchor{'hdp2-oozie::service::end':}
+  
+  if ($daemon_cmd != undef) {
+    hdp2::exec { $daemon_cmd:
+      command => $daemon_cmd,
+      unless  => $no_op_test,
+      initial_wait => $initial_wait
+    }
+
+    if ( $ensure == 'running' ) {
+      Hdp2-oozie::Service::Directory<||> -> Hdp2-oozie::Service::Createsymlinks<||>  -> Hdp2::Exec[$daemon_cmd] -> Anchor['hdp2-oozie::service::end'] } 
+    else {
+      Hdp2-oozie::Service::Directory<||> -> Hdp2::Exec[$daemon_cmd] -> Anchor['hdp2-oozie::service::end']
+    }
+  }
+}
+
+define hdp2-oozie::service::directory()
+{
+  hdp2::directory_recursive_create { $name: 
+    owner => $hdp2-oozie::params::oozie_user,
+    mode => '0755',
+    service_state => $ensure,
+    force => true
+  }
+}
+
+define hdp2-oozie::service::createsymlinks()
+{
+  hdp2::exec { '/usr/lib/oozie/conf':
+    command => "ln -sf /etc/oozie/conf /usr/lib/oozie/conf",
+    unless => "test -e /usr/lib/oozie/conf"
+  }
+
+  hdp2::exec { '/etc/oozie/conf/hadoop-conf':
+    command => "ln -sf /etc/hadoop/conf /etc/oozie/conf/hadoop-conf",
+    unless => "test -e /etc/oozie/conf/hadoop-conf"
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-oozie/templates/oozie-env.sh.erb b/hmc/puppet/modules/hdp2-oozie/templates/oozie-env.sh.erb
new file mode 100644
index 0000000..41ecfb4
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/templates/oozie-env.sh.erb
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#      http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#Set JAVA HOME
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG=<%=scope.function_hdp_template_var("oozie_log_dir")%>/
+
+# Oozie pid directory
+#
+export CATALINA_PID=<%=scope.function_hdp_template_var("oozie_pid_file")%>
+export CATALINA_TMPDIR=<%=scope.function_hdp_template_var("oozie_tmp_dir")%>
+
+#Location of the data for oozie
+export OOZIE_DATA=<%=scope.function_hdp_template_var("oozie_data_dir")%>/
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+# export OOZIE_HTTP_PORT=11000
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
diff --git a/hmc/puppet/modules/hdp2-oozie/templates/oozie-log4j.properties.erb b/hmc/puppet/modules/hdp2-oozie/templates/oozie-log4j.properties.erb
new file mode 100644
index 0000000..a14ae89
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/templates/oozie-log4j.properties.erb
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=DEBUG, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=DEBUG, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=DEBUG, oozie
diff --git a/hmc/puppet/modules/hdp2-oozie/templates/oozie-site.xml.erb b/hmc/puppet/modules/hdp2-oozie/templates/oozie-site.xml.erb
new file mode 100644
index 0000000..12d9e70
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-oozie/templates/oozie-site.xml.erb
@@ -0,0 +1,249 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+        
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->     
+
+<configuration>
+
+<!--
+    Refer to the oozie-default.xml file for the complete list of
+    Oozie configuration properties and their default values.
+-->
+  <property>
+    <name>oozie.base.url</name>
+    <value>http://<%=scope.function_hdp_host("oozie_server")%>:11000/oozie</value>
+    <description>Base Oozie URL.</description>
+   </property>
+
+  <property>
+    <name>oozie.system.id</name>
+    <value>oozie-${user.name}</value>
+    <description>
+    The Oozie system ID.
+    </description>
+   </property>
+
+   <property>
+     <name>oozie.systemmode</name>
+     <value>NORMAL</value>
+     <description>
+     System mode for  Oozie at startup.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.AuthorizationService.security.enabled</name>
+     <value>false</value>
+     <description>
+     Specifies whether security (user name/admin role) is enabled or not.
+     If disabled any user can manage Oozie system and manage any job.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.PurgeService.older.than</name>
+     <value>30</value>
+     <description>
+     Jobs older than this value, in days, will be purged by the PurgeService.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.PurgeService.purge.interval</name>
+     <value>3600</value>
+     <description>
+     Interval at which the purge service will run, in seconds.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.queue.size</name>
+     <value>1000</value>
+     <description>Max callable queue size</description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.threads</name>
+     <value>10</value>
+     <description>Number of threads used for executing callables</description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.callable.concurrency</name>
+     <value>3</value>
+     <description>
+     Maximum concurrency for a given callable type.
+     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+     All commands that use action executors (action-start, action-end, action-kill and action-check) use
+     the action type as the callable type.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.coord.normal.default.timeout</name>
+     <value>120</value>
+     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout</description>
+   </property>
+
+   <property>
+     <name>oozie.db.schema.name</name>
+     <value>oozie</value>
+     <description>
+      Oozie DataBase Name
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.StoreService.create.db.schema</name>
+     <value>true</value>
+     <description>
+     Creates Oozie DB.
+     If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+     If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.StoreService.jdbc.driver</name>
+     <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+     <description>
+     JDBC driver class.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.StoreService.jdbc.url</name>
+     <value>jdbc:derby:<%=scope.function_hdp_template_var("oozie_data_dir")%>/${oozie.db.schema.name}-db;create=true</value>
+     <description>
+     JDBC URL.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.StoreService.jdbc.username</name>
+     <value>sa</value>
+     <description>
+     DB user name.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.StoreService.jdbc.password</name>
+     <value> </value>
+     <description>
+     DB user password.
+     IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+     IMPORTANT: if the StoreServicePasswordService is active, it will reset this value with the value given in
+     the console.
+    </description>
+   </property>
+
+   <property>
+     <name>oozie.service.StoreService.pool.max.active.conn</name>
+     <value>10</value>
+     <description>
+      Max number of connections.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.HadoopAccessorService.kerberos.enabled</name>
+     <value><%=scope.function_hdp_template_var("oozie_sasl_enabled")%></value>
+     <description>
+     Indicates if Oozie is configured to use Kerberos.
+     </description>
+   </property>
+
+   <property>
+     <name>local.realm</name>
+     <value><%=scope.function_hdp_template_var("realm")%></value>
+     <description>
+     Kerberos Realm used by Oozie and Hadoop. Using 'local.realm' to be aligned with Hadoop configuration
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.HadoopAccessorService.keytab.file</name>
+     <value><%=scope.function_hdp_template_var("keytab_path")%>/oozie.service.keytab</value>
+     <description>
+     Location of the Oozie user keytab file.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.HadoopAccessorService.kerberos.principal</name>
+     <value>oozie/<%=scope.function_hdp_host("oozie_server")%>@<%=scope.function_hdp_host("realm")%></value>
+     <description>
+      Kerberos principal for Oozie service.
+     </description>
+   </property>
+
+    <property>
+      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+      <value> </value>
+      <description>
+      Whitelisted job tracker for Oozie service.
+      </description>
+    </property>
+   
+    <property>
+      <name>oozie.authentication.type</name>
+      <value><%=scope.function_hdp_template_var("oozie_security_type")%></value>
+      <description>
+      </description>
+    </property>
+    
+    <property>
+      <name>oozie.authentication.kerberos.principal</name>
+      <value>HTTP/<%=scope.function_hdp_host("oozie_server")%>@<%=scope.function_hdp_host("realm")%></value>
+      <description>
+       Whitelisted job tracker for Oozie service.
+      </description>
+    </property>
+
+    <property>
+      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+      <value> </value>
+      <description>
+      </description>
+    </property>
+
+    <property>
+      <name>oozie.service.WorkflowAppService.system.libpath</name>
+      <value>/user/${user.name}/share/lib</value>
+      <description>
+      System library path to use for workflow applications.
+      This path is added to workflow application if their job properties sets
+      the property 'oozie.use.system.libpath' to true.
+      </description>
+    </property>
+
+    <property>
+      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+      <value>false</value>
+      <description>
+      If set to true, submissions of MapReduce and Pig jobs will include
+      automatically the system library path, thus not requiring users to
+      specify where the Pig JAR files are. Instead, the ones from the system
+      library path are used.
+      </description>
+    </property>
+</configuration>
+
diff --git a/hmc/puppet/modules/hdp2-pig/files/pigSmoke.sh b/hmc/puppet/modules/hdp2-pig/files/pigSmoke.sh
new file mode 100644
index 0000000..b6a286f
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-pig/files/pigSmoke.sh
@@ -0,0 +1,24 @@
+/*
+ *
+ * licensed to the apache software foundation (asf) under one
+ * or more contributor license agreements.  see the notice file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  the asf licenses this file
+ * to you under the apache license, version 2.0 (the
+ * "license"); you may not use this file except in compliance
+ * with the license.  you may obtain a copy of the license at
+ *
+ *   http://www.apache.org/licenses/license-2.0
+ *
+ * unless required by applicable law or agreed to in writing,
+ * software distributed under the license is distributed on an
+ * "as is" basis, without warranties or conditions of any
+ * kind, either express or implied.  see the license for the
+ * specific language governing permissions and limitations
+ * under the license.
+ *
+ */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';
diff --git a/hmc/puppet/modules/hdp2-pig/manifests/init.pp b/hmc/puppet/modules/hdp2-pig/manifests/init.pp
new file mode 100644
index 0000000..d9110a2
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-pig/manifests/init.pp
@@ -0,0 +1,72 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-pig(
+  $service_state = $hdp2::params::cluster_client_state
+) inherits hdp2-pig::params
+{  
+  $pig_config_dir = $hdp2-pig::params::pig_conf_dir
+ 
+  if ($hdp2::params::use_32_bits_on_slaves == false) {
+    $size = 64
+  } else {
+    $size = 32
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state == 'uninstalled') {
+    hdp2::package { 'pig' :
+      ensure => 'uninstalled',
+      size   => $size 
+    }
+    hdp2::directory_recursive_create { $pig_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+   anchor { 'hdp2-pig::begin': } -> Hdp2::Package['pig'] -> Hdp2::Directory_recursive_create[$pig_conf_dir] -> anchor { 'hdp2-pig::end': }
+
+  } elsif ($service_state == 'installed_and_configured') {
+    hdp2::package { 'pig' : 
+      size => $size 
+    }
+
+    hdp2::directory { $pig_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp2-pig::configfile { ['pig-env.sh','pig.properties','log4j.properties']:}
+  
+    anchor { 'hdp2-pig::begin': } -> Hdp2::Package['pig'] -> Hdp2::Directory[$pig_conf_dir] -> Hdp2-pig::Configfile<||> -> anchor { 'hdp2-pig::end': }
+ } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+### config files
+define hdp2-pig::configfile()
+{
+  hdp2::configfile { "${hdp2::params::pig_conf_dir}/${name}":
+    component => 'pig'
+  }
+}
+
+
+
diff --git a/hmc/puppet/modules/hdp2-pig/manifests/params.pp b/hmc/puppet/modules/hdp2-pig/manifests/params.pp
new file mode 100644
index 0000000..246561d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-pig/manifests/params.pp
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-pig::params() inherits hdp2::params
+{
+  $pig_conf_dir = $hdp2::params::pig_conf_dir
+}
diff --git a/hmc/puppet/modules/hdp2-pig/manifests/pig/service_check.pp b/hmc/puppet/modules/hdp2-pig/manifests/pig/service_check.pp
new file mode 100644
index 0000000..132a596
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-pig/manifests/pig/service_check.pp
@@ -0,0 +1,70 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-pig::pig::service_check() 
+{
+  $smoke_test_user = $hdp2::params::smokeuser
+  $input_file = 'passwd'
+  $output_file = "pigsmoke.out"
+
+  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "${cleanup_cmd}; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${output_file}" 
+  
+  anchor { 'hdp2-pig::pig::service_check::begin':}
+
+
+  hdp2-hadoop::exec-hadoop { 'pig::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 3,
+    try_sleep => 5,
+    require   => Anchor['hdp2-pig::pig::service_check::begin'],
+    notify    => File['/tmp/pigSmoke.sh'],
+    user      => $smoke_test_user
+  }
+
+  file { '/tmp/pigSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp2-pig/pigSmoke.sh",
+    mode => '0755',
+    require     => Hdp2-hadoop::Exec-hadoop['pig::service_check::create_file']
+  }
+
+  exec { '/tmp/pigSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'pig /tmp/pigSmoke.sh'",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/pigSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    notify    => Hdp2-hadoop::Exec-hadoop['pig::service_check::test'],
+    logoutput => "true"
+  }
+
+  hdp2-hadoop::exec-hadoop { 'pig::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Exec['/tmp/pigSmoke.sh'],
+    before      => Anchor['hdp2-pig::pig::service_check::end'], #TODO: remove after testing
+    user      => $smoke_test_user
+  }
+  
+  anchor{ 'hdp2-pig::pig::service_check::end':}
+}
diff --git a/hmc/puppet/modules/hdp2-pig/templates/log4j.properties.erb b/hmc/puppet/modules/hdp2-pig/templates/log4j.properties.erb
new file mode 100644
index 0000000..9ef6e2c
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-pig/templates/log4j.properties.erb
@@ -0,0 +1,30 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# ***** Set root logger level to DEBUG and its only appender to A.
+log4j.logger.org.apache.pig=info, A
+
+# ***** A is set to be a ConsoleAppender.
+log4j.appender.A=org.apache.log4j.ConsoleAppender
+# ***** A uses PatternLayout.
+log4j.appender.A.layout=org.apache.log4j.PatternLayout
+log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
diff --git a/hmc/puppet/modules/hdp2-pig/templates/pig-env.sh.erb b/hmc/puppet/modules/hdp2-pig/templates/pig-env.sh.erb
new file mode 100644
index 0000000..1500763
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-pig/templates/pig-env.sh.erb
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME=<%=scope.function_hdp_java_home()%>
+HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp2::params::hadoop_home")%>}
diff --git a/hmc/puppet/modules/hdp2-pig/templates/pig.properties.erb b/hmc/puppet/modules/hdp2-pig/templates/pig.properties.erb
new file mode 100644
index 0000000..1933982
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-pig/templates/pig.properties.erb
@@ -0,0 +1,54 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Pig configuration file. All values can be overwritten by command line arguments.
+
+# log4jconf log4j configuration file
+# log4jconf=./conf/log4j.properties
+
+# a file that contains pig script
+#file=
+
+# load jarfile, colon separated
+#jar=
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=local
+
+#pig.logfile=
+
+#Do not spill temp files smaller than this size (bytes)
+#pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+#pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+#pig.exec.reducers.bytes.per.reducer=1000000000
+#pig.exec.reducers.max=999
+
+#Use this option only when your Pig job will otherwise die because of
+#using more counter than hadoop configured limit
+#pig.disable.counter=true
diff --git a/hmc/puppet/modules/hdp2-zookeeper/files/zkEnv.sh b/hmc/puppet/modules/hdp2-zookeeper/files/zkEnv.sh
new file mode 100644
index 0000000..07017e1
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/files/zkEnv.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+    if [ -d "/etc/zookeeper" ]
+    then
+        ZOOCFGDIR="/etc/zookeeper"
+    else
+        ZOOCFGDIR="$ZOOBINDIR/../conf"
+    fi
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+    ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
+then
+    . "$ZOOCFGDIR/zookeeper-env.sh"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+    ZOO_LOG_DIR="."
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+    ZOO_LOG4J_PROP="INFO,CONSOLE"
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../zookeeper-*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+   CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+    CYGWIN*) cygwin=true ;;
+    *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+    CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"
diff --git a/hmc/puppet/modules/hdp2-zookeeper/files/zkServer.sh b/hmc/puppet/modules/hdp2-zookeeper/files/zkServer.sh
new file mode 100644
index 0000000..49ceb4d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/files/zkServer.sh
@@ -0,0 +1,120 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+    JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ]
+then
+    echo "JMX enabled by default"
+    # for some reason these two options are necessary on jdk6 on Ubuntu
+    #   accord to the docs they are not necessary, but otw jconsole cannot
+    #   do a local attach
+    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+else
+    echo "JMX disabled by user request"
+    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+# Only follow symlinks if readlink supports it
+if readlink -f "$0" > /dev/null 2>&1
+then
+  ZOOBIN=`readlink -f "$0"`
+else
+  ZOOBIN="$0"
+fi
+ZOOBINDIR=`dirname "$ZOOBIN"`
+
+. "$ZOOBINDIR"/zkEnv.sh
+
+if [ "x$2" != "x" ]
+then
+    ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+if $cygwin
+then
+    ZOOCFG=`cygpath -wp "$ZOOCFG"`
+    # cygwin has a "kill" in the shell itself, gets confused
+    KILL=/bin/kill
+else
+    KILL=kill
+fi
+
+echo "Using config: $ZOOCFG"
+
+ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
+
+
+case $1 in
+start)
+    echo  "Starting zookeeper ... "
+    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
+    /bin/echo -n $! > "$ZOOPIDFILE"
+    echo STARTED
+    ;;
+stop)
+    echo "Stopping zookeeper ... "
+    if [ ! -f "$ZOOPIDFILE" ]
+    then
+    echo "error: could not find file $ZOOPIDFILE"
+    exit 1
+    else
+    $KILL -9 $(cat "$ZOOPIDFILE")
+    rm "$ZOOPIDFILE"
+    echo STOPPED
+    fi
+    ;;
+upgrade)
+    shift
+    echo "upgrading the servers to 3.*"
+    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+    echo "Upgrading ... "
+    ;;
+restart)
+    shift
+    "$0" stop ${@}
+    sleep 3
+    "$0" start ${@}
+    ;;
+status)
+    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
+    if [ "x$STAT" = "x" ]
+    then
+        echo "Error contacting service. It is probably not running."
+    else
+        echo $STAT
+    fi
+    ;;
+*)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+
+esac
diff --git a/hmc/puppet/modules/hdp2-zookeeper/files/zkService.sh b/hmc/puppet/modules/hdp2-zookeeper/files/zkService.sh
new file mode 100644
index 0000000..32dfce4
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/files/zkService.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+zkcli_script=$1
+user=$2
+conf_dir=$3
+su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"
diff --git a/hmc/puppet/modules/hdp2-zookeeper/files/zkSmoke.sh b/hmc/puppet/modules/hdp2-zookeeper/files/zkSmoke.sh
new file mode 100644
index 0000000..2097c90
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/files/zkSmoke.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smoke_script=$1
+smoke_user=$2
+conf_dir=$3
+export ZOOKEEPER_EXIT_CODE=0
+zkhosts=` grep server  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
+zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
+echo "zk_node1=$zk_node1"
+# Delete /zk_smoketest znode if exists
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:2181"  
+# Create /zk_smoketest znode on one zookeeper server
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:2181"
+
+for i in $zkhosts ; do
+  echo "Running test on host $i"
+  # Verify the data associated with znode across all the nodes in the zookeeper quorum
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:2181"
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:2181"
+  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:2181")
+  echo $output | grep smoke_data
+  if [[ $? -ne 0 ]] ; then
+    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
+    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
+  fi
+done
+
+su - $zmoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:2181"
+if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
+  echo "Zookeeper Smoke Test: Failed" 
+else
+   echo "Zookeeper Smoke Test: Passed" 
+fi
+exit $ZOOKEEPER_EXIT_CODE
diff --git a/hmc/puppet/modules/hdp2-zookeeper/manifests/client.pp b/hmc/puppet/modules/hdp2-zookeeper/manifests/client.pp
new file mode 100644
index 0000000..1ef1b43
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/manifests/client.pp
@@ -0,0 +1,47 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-zookeeper::client(
+  $service_state = $hdp2::params::cluster_client_state
+) inherits hdp2::params
+{
+  $package_type = $hdp2::params::packages
+
+  if ($service_state == 'no_op') {
+  } elsif  ($service_state in ['installed_and_configured','uninstalled']) {
+      if ($package_type == 'hdp') {
+        $cmd = "ln -s /usr/libexec/zkEnv.sh /usr/bin/zkEnv.sh"
+        $test = "test -e /usr/bin/zkEnv.sh"
+        hdp2::exec { $cmd :
+           command => $cmd,
+           unless  => $test,
+           require => Class['hdp2-zookeeper']
+        }
+      } 
+      if ($hdp2::params::service_exists['hdp2-zookeeper'] != true) {
+        class { 'hdp2-zookeeper' : 
+         type => 'client',
+         service_state => $service_state
+        } 
+      }
+    } else {
+   hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-zookeeper/manifests/init.pp b/hmc/puppet/modules/hdp2-zookeeper/manifests/init.pp
new file mode 100644
index 0000000..d311755
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/manifests/init.pp
@@ -0,0 +1,121 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-zookeeper(
+  $type = server,
+  $service_state = $hdp2::params::cluster_service_state,
+  $myid = 1,
+  $opts = {}
+) inherits hdp2-zookeeper::params 
+{
+
+ if ($service_state == 'no_op') {
+   if ($type == 'server') {
+     $hdp2::params::service_exists['hdp2-zookeeper'] = true
+  }
+ } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+   $zk_user = $hdp2-zookeeper::params::zk_user
+   $zk_config_dir = $hdp2-zookeeper::params::conf_dir
+ 
+   anchor{'hdp2-zookeeper::begin':}
+   anchor{'hdp2-zookeeper::end':}
+
+   if ($service_state == 'uninstalled') {
+     if ($type == 'server') {
+       $hdp2::params::service_exists['hdp2-zookeeper'] = true
+    }
+     hdp2::package { 'zookeeper':
+       ensure => 'uninstalled'
+     }
+     hdp2::directory_recursive_create { $zk_config_dir:
+       service_state => $service_state,
+       force => true
+     }
+
+     if ($type == 'server') {
+        class { 'hdp2-zookeeper::service':
+          ensure => $service_state,
+          myid   => $myid
+        }
+       }
+
+     if ($type == 'server') {
+       Anchor['hdp2-zookeeper::begin'] -> Hdp2::Package['zookeeper'] -> Hdp2::Directory_recursive_create[$zk_config_dir] -> Class['hdp2-zookeeper::service']  -> Anchor['hdp2-zookeeper::end']
+     } else {
+       Anchor['hdp2-zookeeper::begin'] -> Hdp2::Package['zookeeper'] -> Hdp2::Directory_recursive_create[$zk_config_dir] -> Anchor['hdp2-zookeeper::end']
+     }
+   } else {
+     hdp2::package { 'zookeeper':}
+
+     hdp2::user{ $zk_user:}
+
+     hdp2::directory_recursive_create { $zk_config_dir: 
+      service_state => $service_state,
+      force => true
+     }
+
+     hdp2-zookeeper::configfile { ['zoo.cfg','zookeeper-env.sh','configuration.xsl','log4j.properties']: }
+ 
+     if ($hdp2::params::update_zk_shell_files == true) {
+       hdp2-zookeeper::shell_file{ ['zkServer.sh','zkEnv.sh']: }
+     }
+
+     if ($type == 'server') {
+       $hdp2::params::service_exists['hdp2-zookeeper'] = true
+       class { 'hdp2-zookeeper::service': 
+         ensure => $service_state,
+         myid   => $myid
+       }
+      }
+
+      Anchor['hdp2-zookeeper::begin'] -> Hdp2::Package['zookeeper'] -> Hdp2::User[$zk_user] -> 
+        Hdp2::Directory_recursive_create[$zk_config_dir] -> Hdp2-zookeeper::Configfile<||> -> Anchor['hdp2-zookeeper::end']
+      if ($type == 'server') {
+        Hdp2::Directory_recursive_create[$zk_config_dir] -> Hdp2-zookeeper::Configfile<||> -> Class['hdp2-zookeeper::service'] -> Anchor['hdp2-zookeeper::end']
+      }
+      if ($hdp2::params::update_zk_shell_files == true) {
+        Hdp2::Package['zookeeper'] -> Hdp2-zookeeper::Shell_file<||> -> Anchor['hdp2-zookeeper::end']
+      }
+    }
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+### config files
+define hdp2-zookeeper::configfile(
+  $mode = undef
+) 
+{
+  hdp2::configfile { "${hdp2-zookeeper::params::conf_dir}/${name}":
+    component       => 'zookeeper',
+    owner           => $hdp2-zookeeper::params::zk_user,
+    mode            => $mode
+  }
+}
+
+### 
+define hdp2-zookeeper::shell_file()
+{
+  file { "${hdp2::params::zk_bin}/${name}":
+    source => "puppet:///modules/hdp2-zookeeper/${name}", 
+    mode => '0755'
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-zookeeper/manifests/params.pp b/hmc/puppet/modules/hdp2-zookeeper/manifests/params.pp
new file mode 100644
index 0000000..7db9eae
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/manifests/params.pp
@@ -0,0 +1,38 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-zookeeper::params() inherits hdp2::params 
+{
+  $conf_dir = $hdp2::params::zk_conf_dir
+
+  $zk_user = $hdp2::params::zk_user
+  
+  $zk_log_dir = hdp_default("zk_log_dir","/var/log/zookeeper")
+  $zk_data_dir = hdp_default("zk_data_dir","/var/lib/zookeeper/data")
+  $zk_pid_dir = hdp_default("zk_pid_dir","/var/run/zookeeper")
+  $zk_pid_file = "${zk_pid_dir}/zookeeper_server.pid"
+
+
+  $tickTime = hdp_default("tickTime","2000")
+  $initLimit = hdp_default("initLimit","10")
+  $syncLimit = hdp_default("syncLimit","5")
+  $clientPort = hdp_default("clientPort","2181")
+
+}
diff --git a/hmc/puppet/modules/hdp2-zookeeper/manifests/quorum/service_check.pp b/hmc/puppet/modules/hdp2-zookeeper/manifests/quorum/service_check.pp
new file mode 100644
index 0000000..4a18a98
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/manifests/quorum/service_check.pp
@@ -0,0 +1,54 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-zookeeper::quorum::service_check()
+{
+  include hdp2-zookeeper::params
+  $conf_dir = $hdp2-zookeeper::params::conf_dir
+
+  $smoke_test_user = $hdp2::params::smokeuser
+
+  $smoke_script = $hdp2::params::zk_smoke_test_script
+  $quorum_smoke_shell_files = ['zkSmoke.sh']
+
+  anchor { 'hdp2-zookeeper::quorum::service_check::begin':}
+
+  hdp2-zookeeper::quorum_smoke_shell_file { $quorum_smoke_shell_files: }
+
+  anchor{ 'hdp2-zookeeper::quorum::service_check::end':}
+}
+
+define hdp2-zookeeper::quorum_smoke_shell_file()
+{
+  file { '/tmp/zkSmoke.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp2-zookeeper/zkSmoke.sh",
+    mode => '0755'
+  }
+
+  exec { '/tmp/zkSmoke.sh':
+    command   => "sh /tmp/zkSmoke.sh ${smoke_script} ${smoke_test_user} ${conf_dir}",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/zkSmoke.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    logoutput => "true"
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-zookeeper/manifests/service.pp b/hmc/puppet/modules/hdp2-zookeeper/manifests/service.pp
new file mode 100644
index 0000000..03ec304
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/manifests/service.pp
@@ -0,0 +1,97 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-zookeeper::service(
+  $ensure = $hdp2::params::cluster_service_state,
+  $myid
+)
+{
+  include $hdp2-zookeeper::params
+  $user = $hdp2-zookeeper::params::zk_user
+  $conf_dir = $hdp2-zookeeper::params::conf_dir
+  $zk_bin = $hdp2::params::zk_bin
+  $cmd = "/bin/env ZOOCFGDIR=${conf_dir} ZOOCFG=zoo.cfg ${zk_bin}/zkServer.sh"
+
+  $pid_file = $hdp2-zookeeper::params::zk_pid_file  
+
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  'source ${conf_dir}/zookeeper-env.sh ; ${cmd} start'"
+    $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+    #not using $no_op_test = "su - ${user} -c  '${cmd} status'" because checks more than whether there is a service started up
+  } elsif ($ensure == 'stopped') {
+    $daemon_cmd = "su - ${user} -c  'source ${conf_dir}/zookeeper-env.sh ; ${cmd} stop'"
+    #TODO: put in no_op_test for stopped
+    $no_op_test = undef
+  } else {
+    $daemon_cmd = undef
+  }
+  hdp2::directory_recursive_create { $hdp2-zookeeper::params::zk_pid_dir: 
+    owner        => $user,
+    context_tag => 'zk_service',
+    service_state => $ensure,
+    force => true
+  }
+  hdp2::directory_recursive_create { $hdp2-zookeeper::params::zk_log_dir: 
+    owner        => $user,
+    context_tag => 'zk_service',
+    service_state => $ensure,
+    force => true
+  }
+   hdp2::directory_recursive_create { $hdp2-zookeeper::params::zk_data_dir: 
+    owner        => $user,
+    context_tag => 'zk_service',
+    service_state => $ensure,
+    force => true
+  }
+  
+  if ($daemon_cmd != undef) {
+    hdp2::exec { $daemon_cmd:
+      command => $daemon_cmd,
+      unless  => $no_op_test,
+      initial_wait => $initial_wait
+    }
+  }
+
+  if ($ensure == 'uninstalled') {
+    anchor{'hdp2-zookeeper::service::begin':} -> Hdp2::Directory_recursive_create<|context_tag == 'zk_service'|> ->  anchor{'hdp2-zookeeper::service::end':}
+  } else {
+    class { 'hdp2-zookeeper::set_myid': myid => $myid}
+
+    anchor{'hdp2-zookeeper::service::begin':} -> Hdp2::Directory_recursive_create<|context_tag == 'zk_service'|> -> 
+    Class['hdp2-zookeeper::set_myid'] -> anchor{'hdp2-zookeeper::service::end':}
+
+    if ($daemon_cmd != undef) {
+      Class['hdp2-zookeeper::set_myid'] -> Hdp2::Exec[$daemon_cmd] -> Anchor['hdp2-zookeeper::service::end']
+    }
+  }
+}
+
+class hdp2-zookeeper::set_myid($myid)
+{
+  $create_file = "${hdp2-zookeeper::params::zk_data_dir}/myid"
+  $cmd = "echo '${myid}' > ${create_file}"
+  hdp2::exec{ $cmd:
+    command => $cmd,
+    creates  => $create_file
+  }
+}
+
+
+
diff --git a/hmc/puppet/modules/hdp2-zookeeper/manifests/zookeeper/service_check.pp b/hmc/puppet/modules/hdp2-zookeeper/manifests/zookeeper/service_check.pp
new file mode 100644
index 0000000..8da74d4
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/manifests/zookeeper/service_check.pp
@@ -0,0 +1,53 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-zookeeper::zookeeper::service_check()
+{
+  include hdp2-zookeeper::params
+  $conf_dir = $hdp2-zookeeper::params::conf_dir
+  $smoke_script = $hdp2::params::zk_smoke_test_script
+
+  $smoke_test_user = $hdp2::params::smokeuser
+  $zookeeper_smoke_shell_files = ['zkService.sh']
+
+  anchor { 'hdp2-zookeeper::zookeeper::service_check::begin':}
+
+  hdp2-zookeeper::zookeeper_smoke_shell_file { $zookeeper_smoke_shell_files: }
+
+  anchor{ 'hdp2-zookeeper::zookeeper::service_check::end':}
+}
+
+define hdp2-zookeeper::zookeeper_smoke_shell_file()
+{
+  file { '/tmp/zkService.sh':
+    ensure => present,
+    source => "puppet:///modules/hdp2-zookeeper/zkService.sh",
+    mode => '0755'
+  }
+
+  exec { '/tmp/zkService.sh':
+    command   => "sh /tmp/zkService.sh ${smoke_script} ${smoke_test_user} ${conf_dir}",
+    tries     => 3,
+    try_sleep => 5,
+    require   => File['/tmp/zkService.sh'],
+    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    logoutput => "true"
+  }
+}
diff --git a/hmc/puppet/modules/hdp2-zookeeper/templates/configuration.xsl.erb b/hmc/puppet/modules/hdp2-zookeeper/templates/configuration.xsl.erb
new file mode 100644
index 0000000..c003ba2
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/templates/configuration.xsl.erb
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+  <tr>
+     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+     <td><xsl:value-of select="value"/></td>
+     <td><xsl:value-of select="description"/></td>
+  </tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/hmc/puppet/modules/hdp2-zookeeper/templates/log4j.properties.erb b/hmc/puppet/modules/hdp2-zookeeper/templates/log4j.properties.erb
new file mode 100644
index 0000000..db69564
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/templates/log4j.properties.erb
@@ -0,0 +1,71 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# Format is "<default threshold> (, <appender>)+
+
+# DEFAULT: console appender only
+log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=DEBUG
+log4j.appender.ROLLINGFILE.File=zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/hmc/puppet/modules/hdp2-zookeeper/templates/zoo.cfg.erb b/hmc/puppet/modules/hdp2-zookeeper/templates/zoo.cfg.erb
new file mode 100644
index 0000000..25909b5
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/templates/zoo.cfg.erb
@@ -0,0 +1,37 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The number of milliseconds of each tick
+tickTime=<%=scope.function_hdp_template_var("tickTime")%>
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=<%=scope.function_hdp_template_var("initLimit")%>
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=<%=scope.function_hdp_template_var("syncLimit")%>
+# the directory where the snapshot is stored.
+dataDir=<%=scope.function_hdp_template_var("zk_data_dir")%>
+# the port at which the clients will connect
+clientPort=<%=scope.function_hdp_template_var("clientPort")%>
+<%(scope.function_hdp_host("zookeeper_hosts")||[]).each_with_index do |host,i|-%>
+server.<%=(i+1).to_s%>=<%=host%>:2888:3888
+<% end -%>
diff --git a/hmc/puppet/modules/hdp2-zookeeper/templates/zookeeper-env.sh.erb b/hmc/puppet/modules/hdp2-zookeeper/templates/zookeeper-env.sh.erb
new file mode 100644
index 0000000..50f0910
--- /dev/null
+++ b/hmc/puppet/modules/hdp2-zookeeper/templates/zookeeper-env.sh.erb
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+export ZOO_LOG_DIR=<%=scope.function_hdp_template_var("zk_log_dir")%>
+export ZOOPIDFILE=<%=scope.function_hdp_template_var("zk_pid_file")%>
+export SERVER_JVMFLAGS= 
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
\ No newline at end of file
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_args_as_array.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_args_as_array.rb
new file mode 100644
index 0000000..46becea
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_args_as_array.rb
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#to handle differences in how args passed in
+module Puppet::Parser::Functions
+  newfunction(:hdp_args_as_array, :type => :rvalue) do |args|
+    args.kind_of?(Array) ? args : [args]
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_array_from_comma_list.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_array_from_comma_list.rb
new file mode 100644
index 0000000..56882f3
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_array_from_comma_list.rb
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_array_from_comma_list, :type => :rvalue) do |args|
+    args = [args].flatten
+    function_hdp_is_empty(args[0]) ? "" : args[0].split(",") 
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb
new file mode 100644
index 0000000..e83a742
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_calc_xmn_from_xms.rb
@@ -0,0 +1,37 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_calc_xmn_from_xms, :type => :rvalue) do |args|
+    heapsize_orig_str = args[0].to_s
+    xmn_percent = args[1].to_f
+    xmn_max = args[2].to_i
+    heapsize_str = heapsize_orig_str.gsub(/\D/,"")
+    heapsize = heapsize_str.to_i
+    heapsize_unit = heapsize_orig_str.gsub(/\d/,"")
+    xmn_val = heapsize*xmn_percent
+    xmn_val = xmn_val.floor.to_i
+    xmn_val = xmn_val/8
+    xmn_val = xmn_val*8
+    xmn_val = xmn_val > xmn_max ? xmn_max : xmn_val
+    xmn_val_str = "" + xmn_val.to_s + heapsize_unit
+    xmn_val_str
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_default.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_default.rb
new file mode 100644
index 0000000..56ffc66
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_default.rb
@@ -0,0 +1,32 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_default, :type => :rvalue) do |args|
+    args = function_hdp_args_as_array(args)
+    scoped_var_name = args[0]
+    var_name = scoped_var_name.split("/").last
+    default = args[1]
+    val = lookupvar("::#{var_name}")
+    function_hdp_is_empty(val) ? (default||"") : val
+  end
+end
+
+
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_fail.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_fail.rb
new file mode 100644
index 0000000..9241b1d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_fail.rb
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_fail) do |args|
+    args = [args].flatten
+    msg = args[0]
+    function_fail(msg)
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_host.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_host.rb
new file mode 100644
index 0000000..4a9b142
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_host.rb
@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_host, :type => :rvalue) do |args|
+    args = function_hdp_args_as_array(args)
+    var = args[0]
+    val = lookupvar(var)
+    function_hdp_is_empty(val) ? "" : val 
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_host_attribute.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_host_attribute.rb
new file mode 100644
index 0000000..1df1392
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_host_attribute.rb
@@ -0,0 +1,40 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_host_attribute, :type => :rvalue) do |args|
+    args = function_hdp_args_as_array(args)
+    hash,attr,source = args
+    ret_val = lambda do |hash,attr,s|
+      ret = ""
+      ndx = hash[s]
+      unless function_hdp_is_empty(ndx)
+        val = ndx[attr]
+        ret = function_hdp_is_empty(val) ? "" : val
+      end
+      ret
+    end
+    if source.kind_of?(Array)
+      source.map{|s|ret_val.call(hash,attr,s)}
+    else
+     ret_val.call(hash,attr,source)
+    end
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_is_empty.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_is_empty.rb
new file mode 100644
index 0000000..f57f8eb
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_is_empty.rb
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_is_empty, :type => :rvalue) do |args|
+    args = function_hdp_args_as_array(args)
+    el = args[0]
+    el.nil? or (el.respond_to?(:to_s) and ["undefined","undef",""].include?(el.to_s))
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_java_home.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_java_home.rb
new file mode 100644
index 0000000..abc818d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_java_home.rb
@@ -0,0 +1,29 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_java_home, :type => :rvalue) do 
+    size = lookupvar("size")
+    if size.nil? or size == :undefined
+      size = "64"
+    end
+    lookupvar("::hdp2::params::java#{size.to_s}_home")
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_no_hosts.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_no_hosts.rb
new file mode 100644
index 0000000..6c1a988
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_no_hosts.rb
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_no_hosts, :type => :rvalue) do |args|
+    args = function_hdp_args_as_array(args)
+    var = args[0]
+    function_hdp_is_empty(function_hdp_host(var))
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_option_value.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_option_value.rb
new file mode 100644
index 0000000..1348879
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_option_value.rb
@@ -0,0 +1,32 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_option_value, :type => :rvalue) do |args|
+    args = [args].flatten
+    opts = args[0]
+    key = args[1]
+    if opts.kind_of?(Hash) and not function_hdp_is_empty(key)
+      opts[key]||:undef
+    else
+      :undef
+    end
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_template_var.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_template_var.rb
new file mode 100644
index 0000000..1b552d9
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_template_var.rb
@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_template_var, :type => :rvalue) do |args|
+    args = [args].flatten
+    qualified_var = args[0]
+    unless qualified_var =~ /^::/
+      #module_name = lookupvar("module_name")||"UNKNOWN"
+      #qualified_var = "::#{module_name}::params::#{args[0]}"
+      component = lookupvar("component")||"UNKNOWN"
+      module_name = (component == "base" ? "::hdp2" : "::hdp2-#{component}")      
+      qualified_var = "#{module_name}::params::#{args[0]}"
+    end
+    val = lookupvar(qualified_var)
+    (val.nil? or val == :undefined) ? "" : val 
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_unique_id_and_date.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_unique_id_and_date.rb
new file mode 100644
index 0000000..01179e1
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_unique_id_and_date.rb
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_unique_id_and_date, :type => :rvalue) do 
+    id = lookupvar('::uniqueid')
+    date = `date +"%M%d%y"`.chomp
+    "id#{id}_date#{date}"
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_user.rb b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_user.rb
new file mode 100644
index 0000000..c60c7e2
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/lib/puppet/parser/functions/hdp_user.rb
@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+module Puppet::Parser::Functions
+  newfunction(:hdp_user, :type => :rvalue) do |args|
+    args = [args].flatten
+    user = args[0]
+    val = lookupvar("::hdp2::params::#{user}")
+    (val.nil? or val == :undefined) ? "" : val 
+  end
+end
diff --git a/hmc/puppet/modules/hdp2/manifests/.directory b/hmc/puppet/modules/hdp2/manifests/.directory
new file mode 100644
index 0000000..f30512d
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/.directory
@@ -0,0 +1,3 @@
+[Dolphin]
+Timestamp=2011,3,16,9,26,14
+ViewMode=1
diff --git a/hmc/puppet/modules/hdp2/manifests/configfile.pp b/hmc/puppet/modules/hdp2/manifests/configfile.pp
new file mode 100644
index 0000000..2da83da
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/configfile.pp
@@ -0,0 +1,85 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2::configfile(
+  $component,
+  $conf_dir = undef, #if this is undef then name is of form conf_dir/file_name
+  $owner = undef, 
+  $group = $hdp2::params::hadoop_user_group,
+  $mode = undef,
+  $size = 64, #32 or 64 bit (used to pick appropriate java_home)
+  $template_tag = undef,
+  $namenode_host = $hdp2::params::namenode_host,
+  $yarn_rm_host = $hdp2::params::yarn_rm_host,
+  $snamenode_host = $hdp2::params::snamenode_host,
+  $slave_hosts = $hdp2::params::slave_hosts,
+  $hbase_rs_hosts = $hdp2::params::hbase_rs_hosts,
+  $zookeeper_hosts = $hdp2::params::zookeeper_hosts,
+  $hbase_master_host = $hdp2::params::hbase_master_host,
+  $hcat_server_host = $hdp2::params::hcat_server_host,
+  $hive_server_host = $hdp2::params::hive_server_host,
+  $oozie_server = $hdp2::params::oozie_server,
+  $templeton_server_host = $hdp2::params::templeton_server_host,
+  $hcat_mysql_host = $hdp2::params::hcat_mysql_host,
+  $nagios_server_host = $hdp2::params::nagios_server_host,
+  $ganglia_server_host = $hdp2::params::ganglia_server_host,
+  $dashboard_host = $hdp2::params::dashboard_host,
+  $gateway_host = $hdp2::params::gateway_host,
+  $public_namenode_host = $hdp2::params::public_namenode_host,
+  $public_snamenode_host = $hdp2::params::public_snamenode_host,
+  $public_yarn_rm_host = $hdp2::params::public_yarn_rm_host,
+  $public_hbase_master_host = $hdp2::params::public_hbase_master_host,
+  $public_zookeeper_hosts = $hdp2::params::public_zookeeper_hosts,
+  $public_ganglia_server_host = $hdp2::params::public_ganglia_server_host,
+  $public_nagios_server_host = $hdp2::params::public_nagios_server_host,
+  $public_dashboard_host = $hdp2::params::public_dashboard_host,
+  $public_hive_server_host = $hdp2::params::public_hive_server_host,
+  $public_oozie_server = $hdp2::params::public_oozie_server,
+  $public_templeton_server_host = $hdp2::params::public_templeton_server_host
+) 
+{
+
+   if ($conf_dir == undef) {
+     $qualified_file_name = $name
+     $file_name = regsubst($name,'^.+/([^/]+$)','\1')
+   } else {
+     $qualified_file_name = "${conf_dir}/${name}"
+     $file_name = $name
+   }
+   if ($component == 'base') {
+     $module = 'hdp2'
+   } else {
+      $module = "hdp2-${component}"   
+   }
+
+   if ($template_tag == undef) {  
+     $template_name = "${module}/${file_name}.erb"
+   } else {
+     $template_name = "${module}/${file_name}-${template_tag}.erb"
+   }
+
+   file{ $qualified_file_name:
+     ensure  => present,
+     owner   => $owner,
+     group   => $group,
+     mode    => $mode,
+     content => template($template_name)
+  }
+}
diff --git a/hmc/puppet/modules/hdp2/manifests/init.pp b/hmc/puppet/modules/hdp2/manifests/init.pp
new file mode 100644
index 0000000..bab6a92
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/init.pp
@@ -0,0 +1,301 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2(
+  $service_state = undef,
+  $pre_installed_pkgs = undef
+)
+{
+  import 'params.pp'
+  include hdp2::params
+  
+  Exec { logoutput => 'on_failure' }
+ 
+  group { $hdp2::params::hadoop_user_group :
+    ensure => present
+  }
+
+  #TODO: think not needed and also there seems to be a puppet bug around this and ldap
+  hdp2::user { $hdp2::params::hadoop_user:
+    gid => $hdp2::params::hadoop_user_group
+  }
+  Group[$hdp2::params::hadoop_user_group] -> Hdp2::User[$hdp2::params::hadoop_user] 
+  class { 'hdp2::snmp': service_state => 'running'}
+
+  class { 'hdp2::create_smoke_user': }
+
+  if ($pre_installed_pkgs != undef) {
+    class { 'hdp2::pre_install_pkgs': }
+  }
+
+  #turns off selinux
+  class { 'hdp2::set_selinux': }
+
+  if ($service_state != 'uninstalled') {
+    if ($hdp2::params::mapreduce_lzo_enabled == true) {
+      @hdp2::lzo::package{ 32:}
+      @hdp2::lzo::package{ 64:}
+    }
+  }
+
+  #TODO: treat consistently 
+  if ($service_state != 'uninstalled') {
+    if ($hdp2::params::mapreduce_snappy_enabled == true) {
+      include hdp2::snappy::package
+    }
+  }
+
+  Hdp2::Package<|title == 'hadoop 32'|> ->   Hdp2::Package<|title == 'hbase'|>
+  Hdp2::Package<|title == 'hadoop 64'|> ->   Hdp2::Package<|title == 'hbase'|>
+
+  #TODO: just for testing
+  class{ 'hdp2::iptables': 
+    ensure => stopped,
+  }
+
+  case $hdp2::params::hdp_os_type {
+    centos6, rhel6: {
+      hdp2::package{ 'glibc-rhel6':
+        ensure       => 'present',
+        size         => $size,
+        java_needed  => false,
+        lzo_needed   => false
+      }
+    }
+  }
+
+}
+
+class hdp2::pre_install_pkgs
+{
+  if ($service_state == 'installed_and_configured') {
+    hdp2::exec{ 'yum install $pre_installed_pkgs':
+       command => "yum install -y $pre_installed_pkgs"
+    }
+  } elsif ($service_state == 'uninstalled') {
+    hdp2::exec{ 'yum erase $pre_installed_pkgs':
+       command => "yum erase -y $pre_installed_pkgs"
+    }
+  }
+}
+
+class hdp2::create_smoke_user()
+{
+  $smoke_group = $hdp2::params::smoke_user_group
+  $smoke_user = $hdp2::params::smokeuser
+
+  group { $smoke_group :
+    ensure => present
+  }
+
+  hdp2::user { $smoke_user:}
+
+  $cmd = "usermod -g  $smoke_group  $smoke_user"
+  $check_group_cmd = "id -gn $smoke_user | grep $smoke_group"
+  hdp2::exec{ $cmd:
+     command => $cmd,
+     unless => $check_group_cmd
+  }
+ 
+  Group[$smoke_group] -> Hdp2::User[$smoke_user] -> Hdp2::Exec[$cmd] 
+}
+
+
+class hdp2::set_selinux()
+{
+ $cmd = "/bin/echo 0 > /selinux/enforce"
+ hdp2::exec{ $cmd:
+    command => $cmd,
+    unless => "head -n 1 /selinux/enforce | grep ^0$"
+  }
+}
+
+define hdp2::user(
+  $gid = $hdp2::params::hadoop_user_group,
+  $just_validate = undef
+)
+{
+  $user_info = $hdp2::params::user_info[$name]
+  if ($just_validate != undef) {
+    $just_val  = $just_validate
+  } elsif (($user_info == undef) or ("|${user_info}|" == '||')){ #tests for different versions of Puppet
+    $just_val = false
+  } else {
+    $just_val = $user_info[just_validate]
+  }
+  
+  if ($just_val == true) {
+    exec { "user ${name} exists":
+      command => "su - ${name} -c 'ls /dev/null' >/dev/null 2>&1",
+      path    => ['/bin']
+    }
+  } else {
+    user { $name:
+      ensure     => present,
+      managehome => true,
+      #gid        => $gid, #TODO either remove this to support LDAP env or fix it
+      shell      => '/bin/bash'
+    }
+  }
+}
+     
+define hdp2::directory(
+  $owner = $hdp2::params::hadoop_user,
+  $group = $hdp2::params::hadoop_user_group,
+  $mode  = undef,
+  $ensure = directory,
+  $force = undef,
+  $service_state = 'running'
+  )
+{
+ if (($service_state == 'uninstalled') and ($wipeoff_data == true)) {
+  file { $name :
+    ensure => absent,
+    owner  => $owner,
+    group  => $group,
+    mode   => $mode,
+    force  => $force
+   }
+  } elsif ($service_state != 'uninstalled') {
+  file { $name :
+    ensure => present,
+    owner  => $owner,
+    group  => $group,
+    mode   => $mode,
+    force  => $force
+   }
+  }
+}
+#TODO: check on -R flag and use of recurse
+define hdp2::directory_recursive_create(
+  $owner = $hdp2::params::hadoop_user,
+  $group = $hdp2::params::hadoop_user_group,
+  $mode = undef,
+  $context_tag = undef,
+  $ensure = directory,
+  $force = undef,
+  $service_state = 'running'
+  )
+{
+  hdp2::exec {"mkdir -p ${name}" :
+    command => "mkdir -p ${name}",
+    creates => $name
+  }
+  #to take care of setting ownership and mode
+  hdp2::directory { $name :
+    owner => $owner,
+    group => $group,
+    mode  => $mode,
+    ensure => $ensure,
+    force => $force,
+    service_state => $service_state
+  }
+  Hdp2::Exec["mkdir -p ${name}"] -> Hdp2::Directory[$name]
+}
+
+### helper to do exec
+define hdp2::exec(
+  $command,
+  $refreshonly = undef,
+  $unless = undef,
+  $onlyif = undef,
+  $path = $hdp2::params::exec_path,
+  $user = undef,
+  $creates = undef,
+  $tries = 1,
+  $timeout = 900,
+  $try_sleep = undef,
+  $initial_wait = undef,
+  $logoutput = 'on_failure',
+  $cwd = undef
+)
+{
+     
+  if (($initial_wait != undef) and ($initial_wait != "undef")) {
+    #passing in creates and unless so dont have to wait if condition has been acheived already
+    hdp2::wait { "service ${name}" : 
+      wait_time => $initial_wait,
+      creates   => $creates,
+      unless    => $unless,
+      onlyif    => $onlyif,
+      path      => $path
+    }
+  }
+  
+  exec { $name :
+    command     => $command,
+    refreshonly => $refreshonly,
+    path        => $path,
+    user        => $user,
+    creates     => $creates,
+    unless      => $unless,
+    onlyif      => $onlyif,
+    tries       => $tries,
+    timeout     => $timeout,
+    try_sleep   => $try_sleep,
+    logoutput   => $logoutput,
+    cwd         => $cwd
+  }
+  
+  anchor{ "hdp2::exec::${name}::begin":} -> Exec[$name] -> anchor{ "hdp2::exec::${name}::end":} 
+  if (($initial_wait != undef) and ($initial_wait != "undef")) {
+    Anchor["hdp2::exec::${name}::begin"] -> Hdp2::Wait["service ${name}"] -> Exec[$name]
+  }
+}
+
+#### utilities for waits
+define hdp2::wait(
+  $wait_time,
+  $creates = undef,
+  $unless = undef,
+  $onlyif = undef,
+  $path = undef #used for unless
+)   
+{
+  exec { "wait ${name} ${wait_time}" :
+    command => "/bin/sleep ${wait_time}",
+    creates => $creates,
+    unless  => $unless,
+    onlyif  => $onlyif,
+    path    => $path
+  } 
+}
+
+#### artifact_dir
+define hdp2::artifact_dir()
+{
+  include hdp2_artifact_dir_shared
+}
+class hdp2_artifact_dir_shared()
+{
+  file{ $hdp2::params::artifact_dir:
+    ensure  => directory
+  }
+}
+
+##### temp
+
+class hdp2::iptables($ensure)
+{
+  #TODO: just temp so not considering things like saving firewall rules
+  service { 'iptables':
+    ensure => $ensure
+  }
+}
diff --git a/hmc/puppet/modules/hdp2/manifests/java/package.pp b/hmc/puppet/modules/hdp2/manifests/java/package.pp
new file mode 100644
index 0000000..bf37e8a
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/java/package.pp
@@ -0,0 +1,70 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2::java::package(
+  $size,
+  $include_artifact_dir = true
+)
+{
+    
+  include hdp2::params
+  
+  $jdk_bin = $hdp2::params::jdk_bins[$size]
+  $artifact_dir = $hdp2::params::artifact_dir
+  $jdk_location = $hdp2::params::jdk_location
+  $jdk_curl_target = "${artifact_dir}/${jdk_bin}"
+ 
+  
+  if ($size == "32") {
+    $java_home = $hdp2::params::java32_home
+  } else {
+    $java_home = $hdp2::params::java64_home
+  }
+  $java_exec = "${java_home}/bin/java"
+  $java_dir = regsubst($java_home,'/[^/]+$','')
+   
+  if ($include_artifact_dir == true) {
+    hdp2::artifact_dir{ "java::package::${name}": }
+  }
+  
+  $curl_cmd = "mkdir -p ${artifact_dir} ; curl -f --retry 10 ${jdk_location}/${jdk_bin} -o ${jdk_curl_target}"
+  exec{ "${curl_cmd} ${name}":
+    command => $curl_cmd,
+    creates => $jdk_curl_target,
+    path    => ["/bin","/usr/bin/"],
+    unless  => "test -e ${java_exec}"
+  }
+ 
+  $install_cmd = "mkdir -p ${java_dir} ; chmod +x ${jdk_curl_target}; cd ${java_dir} ; echo A | ${jdk_curl_target} -noregister > /dev/null 2>&1"
+  exec{ "${install_cmd} ${name}":
+    command => $install_cmd,
+    unless  => "test -e ${java_exec}",
+    path    => ["/bin","/usr/bin/"]
+  }
+ 
+  file { "${java_exec} ${name}":
+  ensure => present
+  }   
+ 
+  anchor{"hdp2::java::package::${name}::begin":} -> Exec["${curl_cmd} ${name}"] ->  Exec["${install_cmd} ${name}"] -> File["${java_exec} ${name}"] -> anchor{"hdp2::java::package::${name}::end":}
+  if ($include_artifact_dir == true) {
+    Anchor["hdp2::java::package::${name}::begin"] -> Hdp2::Artifact_dir["java::package::${name}"] -> Exec["${curl_cmd} ${name}"]
+  }
+}
diff --git a/hmc/puppet/modules/hdp2/manifests/lzo/package.pp b/hmc/puppet/modules/hdp2/manifests/lzo/package.pp
new file mode 100644
index 0000000..a308b4b
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/lzo/package.pp
@@ -0,0 +1,46 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2::lzo::package()
+{
+  $size = $name
+
+
+  case $hdp2::params::hdp_os_type {
+    centos6, rhel6: {
+      $pkg_type = 'lzo-rhel6'
+    }
+    default: {
+      $pkg_type = 'lzo-rhel5'
+    }
+  }
+
+
+  hdp2::package {"lzo ${size}":
+    package_type  => "${pkg_type}", 
+    size          => $size,
+    java_needed   => false
+  }
+
+  $anchor_beg = "hdp2::lzo::package::${size}::begin"
+  $anchor_end = "hdp2::lzo::package::${size}::end"
+  anchor{$anchor_beg:} ->  Hdp2::Package["lzo ${size}"] -> anchor{$anchor_end:}
+}
+
diff --git a/hmc/puppet/modules/hdp2/manifests/namenode-conn.pp b/hmc/puppet/modules/hdp2/manifests/namenode-conn.pp
new file mode 100644
index 0000000..069b758
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/namenode-conn.pp
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2::namenode-conn($namenode_host)
+{
+  Hdp2::Configfile<||>{namenode_host => $namenode_host}
+}
diff --git a/hmc/puppet/modules/hdp2/manifests/package.pp b/hmc/puppet/modules/hdp2/manifests/package.pp
new file mode 100644
index 0000000..2c71aa8
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/package.pp
@@ -0,0 +1,100 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2::package(
+  $ensure = present,
+  $package_type = undef,
+  $size = 64,
+  $java_needed = true,
+  $lzo_needed = false,
+  $provider = yum
+  )
+{
+ 
+  $pt = $package_type ? {
+    undef  => $name,
+    default  => $package_type
+  }
+  
+  case $provider {
+    'yum': { 
+      hdp2::package::yum { $name:
+        ensure       => $ensure,
+        package_type => $pt,
+        size         => $size,
+        java_needed  => $java_needed,
+        lzo_needed   => $lzo_needed
+      }
+    }
+    default: {
+      hdp_fail("No support for provider ${provider}")
+    }
+  }
+}
+
+define hdp2::package::yum(
+  $ensure = present,
+  $package_type,
+  $size,
+  $java_needed,
+  $lzo_needed
+  )
+{
+    
+  include hdp2::params
+ 
+  $package_type_info = $hdp2::params::package_names[$package_type]
+  if hdp_is_empty($package_type_info) {
+    hdp_fail("Cannot find info about package type ${package_type}") 
+  }
+  $package_name = $package_type_info[$size]
+  if hdp_is_empty($package_name) {
+    hdp_fail("Cannot find package ${package_type} of size ${size}")
+  }
+  
+  if (($java_needed == true) and ($ensure == 'present')){
+    hdp2::java::package{ $name:
+      size                 => $size,
+      include_artifact_dir => true
+    }
+  }
+
+  if (($lzo_needed == true) and ($ensure == 'present')){
+    Hdp2::Lzo::Package<|title == $size|>
+  }
+
+  if ($ensure == 'uninstalled') {
+    $ensure_actual = 'purged'
+  } else {
+    $ensure_actual = $ensure
+  }
+  $tag = regsubst($name,' ','-',G)
+  package{ $package_name:
+    ensure   => $ensure_actual,
+    provider => yum,
+    tag      => $tag
+  }
+  anchor{ "hdp2::package::${name}::begin": } -> Package<|tag == $tag|> -> anchor{ "hdp2::package::${name}::end": }
+  
+  if (($java_needed == true)and ($ensure == 'present')) {
+   Anchor["hdp2::package::${name}::begin"] -> Hdp2::Java::Package[$name] -> Anchor["hdp2::package::${name}::end"] 
+  }
+}
+
diff --git a/hmc/puppet/modules/hdp2/manifests/params.pp b/hmc/puppet/modules/hdp2/manifests/params.pp
new file mode 100644
index 0000000..61b8c0c
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/params.pp
@@ -0,0 +1,364 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2::params()
+{
+
+  ##### global state defaults ####
+  $cluster_service_state = hdp_default("cluster_service_state","running")
+  $cluster_client_state = hdp_default("cluster_client_state","installed_and_configured")
+
+  ###### hostnames
+  $namenode_host = hdp_default("namenode_host")
+  $snamenode_host = hdp_default("snamenode_host")
+  $yarn_rm_host = hdp_default("yarn_rm_host")
+  $slave_hosts = hdp_default("slave_hosts")
+  
+  $zookeeper_hosts = hdp_default("zookeeper_hosts")
+
+  $hbase_master_host = hdp_default("hbase_master_host")
+  $hbase_rs_hosts = hdp_default("hbase_rs_hosts",$slave_hosts) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+
+  $hive_server_host = hdp_default("hive_server_host")
+  $oozie_server =  hdp_default("oozie_server")
+  $templeton_server_host = hdp_default("templeton_server_host")
+  $gateway_host = hdp_default("gateway_host")
+  
+  $nagios_server_host = hdp_default("nagios_server_host")
+  $ganglia_server_host = hdp_default("ganglia_server_host")
+  
+  $dashboard_host = hdp_default("dashboard_host")
+
+  $hdp_os = $::operatingsystem
+  $hdp_os_version = $::operatingsystemrelease
+  case $::operatingsystem {
+    centos: {
+      case $::operatingsystemrelease {
+        /^5\..+$/: { $hdp_os_type = "centos5" }
+        /^6\..+$/: { $hdp_os_type = "centos6" }
+      }
+    }
+    redhat: {
+      case $::operatingsystemrelease {
+        /^5\..+$/: { $hdp_os_type = "rhel5" }
+        /^6\..+$/: { $hdp_os_type = "rhel6" }
+      }
+    }
+    default: {
+      hdp_fail("No support for os  ${hdp_os} ${hdp_os_version}")
+    }
+  }
+
+  if ($hostAttributes != undef) {
+    $public_namenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$namenode_host)
+    $public_snamenode_host = hdp_host_attribute($hostAttributes,"publicfqdn",$snamenode_host)
+    $public_yarn_rm_host = hdp_host_attribute($hostAttributes,"publicfqdn",$yarn_rm_host)
+    $public_hbase_master_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hbase_master_host)
+    $public_zookeeper_hosts = hdp_host_attribute($hostAttributes,"publicfqdn",$zookeeper_hosts)
+    $public_ganglia_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$ganglia_server_host)
+    $public_nagios_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$nagios_server_host)
+    $public_dashboard_host = hdp_host_attribute($hostAttributes,"publicfqdn",$dashboard_host)
+    $public_hive_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$hive_server_host)
+    $public_oozie_server = hdp_host_attribute($hostAttributes,"publicfqdn",$oozie_server)
+    $public_templeton_server_host = hdp_host_attribute($hostAttributes,"publicfqdn",$templeton_server_host)
+  } else {
+    $public_namenode_host = hdp_default("namenode_host")
+    $public_snamenode_host = hdp_default("snamenode_host")
+    $public_yarn_rm_host = hdp_default("yarn_rm_host")
+    $public_hbase_master_host = hdp_default("hbase_master_host")
+    $public_zookeeper_hosts = hdp_default("zookeeper_hosts")
+    $public_ganglia_server_host = hdp_default("ganglia_server_host")
+    $public_nagios_server_host = hdp_default("nagios_server_host")
+    $public_dashboard_host = hdp_default("dashboard_host")
+    $public_hive_server_host = hdp_default("hive_server_host")
+    $public_oozie_server = hdp_default("oozie_server")
+    $public_templeton_server_host = hdp_default("templeton_server_host")
+  }
+
+
+  ############ users
+  $user_info = hdp_default("user_info",{})
+
+  $hdfs_user = hdp_default("hdfs_user","hdfs")
+  $yarn_user = hdp_default("yarn_user","yarn")
+  $mapred_user = hdp_default("mapred_user","mapred")
+
+  $zk_user = hdp_default("zk_user","zookeeper") 
+  $hbase_user = hdp_default("hbase_user","hbase")
+
+  $hive_user = hdp_default("hive_user","hive")
+  $hcat_user = hdp_default("hcat_user","hcat")
+
+  $oozie_user = hdp_default("oozie_user","oozie")
+  $templeton_user = hdp_default("templeton_user","templeton")
+
+  $gmetad_user = hdp_default("gmetad_user","nobody")
+  $gmond_user = hdp_default("gmond_user","nobody")
+
+  $smokeuser = hdp_default("smokeuser","ambari_qa")
+  $smoke_user_group = hdp_default("smoke_user_group","users")
+
+  #because of Puppet user resource issue make sure that $hadoop_user is different from hadoop_user_group
+  $hadoop_user = hdp_default("hadoop_user", "hadoop_deploy")
+  $hadoop_user_group = hdp_default("hadoop_user_group","hadoop")
+
+  $ganglia_enabled = hdp_default("ganglia_enabled",true) 
+
+  #TODO: either remove or make conditional on ec2
+  $host_address = undef 
+
+  ##### java 
+  $java32_home = hdp_default("java32_home","/usr/jdk32/jdk1.6.0_31")
+  $java64_home = hdp_default("java64_home","/usr/jdk64/jdk1.6.0_31")
+  
+  $wipeoff_data =  hdp_default("wipeoff_data",false) 
+
+  $jdk_location = hdp_default("jdk_location","http://download.oracle.com/otn-pub/java/jdk/6u31-b04")
+  $jdk_bins = hdp_default("jdk_bins",{
+    32 => "jdk-6u31-linux-i586.bin",
+    64 => "jdk-6u31-linux-x64.bin"
+  })
+  
+  #####
+  $hadoop_home = hdp_default("hadoop_home","/usr")
+  $hadoop_lib_home = hdp_default("hadoop_lib_home","/usr/lib/hadoop/lib")
+
+  #### We don't need this if Hadoop and Bigtop auto detect yarn-home
+  ## Temporary work-around todo: remove
+  $hadoop_yarn_home = hdp_default("hadoop_yarn_home","/usr")
+
+  #####compression related
+
+  $mapreduce_lzo_enabled = hdp_default("mapreduce_lzo_enabled",false)
+  $mapreduce_snappy_enabled = hdp_default("mapreduce_snappy_enabled",false)
+  
+  $lzo_compression_so_dirs = {
+    32 => "${hadoop_lib_home}/native/Linux-i386-32/",
+    64 => "${hadoop_lib_home}/native/Linux-amd64-64/"
+  }
+  
+  $snappy_so_src_dir = {
+    32 => "${hadoop_home}/lib",
+    64 => "${hadoop_home}/lib64"
+  }
+  $snappy_compression_so_dirs = {
+    32 => "${hadoop_lib_home}/native/Linux-i386-32/",
+    64 => "${hadoop_lib_home}/native/Linux-amd64-64/"
+  }
+
+  $lzo_tar_name = hdp_default("lzo_tar_name","hadoop-lzo-0.5.0")
+  
+  $snappy_so = hdp_default("snappy_so","libsnappy.so")
+  #####
+ 
+  $exec_path = ["/bin","/usr/bin", "/usr/sbin"]
+
+   #### params used on multiple modules
+  $dfs_data_dir = hdp_default("hadoop/hdfs-site/dfs_data_dir","/tmp/hadoop-hdfs/dfs/data")
+
+  ### artifact dir
+  $artifact_dir = hdp_default("artifact_dir","/tmp/HDP2-artifacts/")
+
+  ### artifacts download url ##
+  $apache_artifacts_download_url = hdp_default("apache_artifacts_download_url","https://s3.amazonaws.com/dev.hortonworks.com/HDP-1.0.11/repos/centos5/tars")
+  $gpl_artifacts_download_url = hdp_default("gpl_artifacts_download_url","https://s3.amazonaws.com/dev.hortonworks.com/ARTIFACTS") 
+
+  ### related to package resources  
+ 
+  $package_names = {
+   # hadoop => {
+   #   32 => 'hadoop.i386',
+   #   64 => 'hadoop.x86_64'
+   # },
+   # zookeeper => {
+   #   64 => 'zookeeper.x86_64'
+   # },
+   # hbase => {
+   #   64 => 'hbase.x86_64'
+   # },
+   # hcat-server => {
+   #   64 => 'hcatalog-server.x86_64'
+   # },
+   # hcat-base => {
+   #   64 => 'hcatalog.x86_64'
+   # },
+   # pig => {
+   #   32 => 'pig.i386'
+   # },
+    ganglia-monitor => {
+      64 => 'ganglia-gmond-3.2.0'
+    },
+    ganglia-server => {
+      64 => ['ganglia-gmetad-3.2.0']
+    },
+    ganglia-gweb => {
+      64 => 'gweb'
+    },
+    ganglia-hdp-gweb-addons => {
+      64 => 'hdp_mon_ganglia_addons'
+    },
+    glibc-rhel6 => {
+      32 => ['glibc','glibc.i686'],
+      64 => ['glibc','glibc.i686']
+    },
+    nagios-addons => {
+      64 => 'hdp_mon_nagios_addons'
+    },
+    nagios-server => {
+      64 => 'nagios-3.2.3'
+    },
+    nagios-plugins => {
+      64 => 'nagios-plugins-1.4.9'
+    },
+    nagios-fping => {
+      64 =>'fping'
+    },
+    nagios-php-pecl-json => {
+      64 => 'php-pecl-json.x86_64'
+    },
+    snmp => {
+      64 => ['net-snmp','net-snmp-utils'],
+    },
+    dashboard => {
+      64 => 'hdp_mon_dashboard'
+    },
+    # sqoop => {
+    #   32 => 'sqoop-1.4.1-1.noarch'
+    #},
+    templeton => {
+       32 => 'templeton',
+       64 => 'templeton'
+    },
+    oozie-client => {
+      64 => 'oozie-client.noarch'
+    },
+    oozie-server => {
+      64 => 'oozie.noarch'
+    },
+    lzo-rhel5 => {
+      32 => ['lzo','lzo.i386','lzo-devel','lzo-devel.i386'],
+      64 => ['lzo','lzo.i386','lzo-devel','lzo-devel.i386']
+    },
+    lzo-rhel6 => {
+      32 => ['lzo','lzo.i686','lzo-devel','lzo-devel.i686'],
+      64 => ['lzo','lzo.i686','lzo-devel','lzo-devel.i686']
+    },
+    #TODO: make these two consistent on whether case of 64/32 bits
+    snappy => {
+      32 =>  ['snappy','snappy-devel'],
+      64 => ['snappy','snappy-devel']
+    },
+    mysql => {
+      32 =>  ['mysql','mysql-server']
+    },
+    mysql-connector => {
+      64 =>  ['mysql-connector-java']
+    },
+    extjs => {
+      64 =>  ['extjs-2.2-1']
+    },
+    templeton-tar-hive => {
+      64 => ['templeton-tar-hive-0.0.1.15-1']
+    },
+    templeton-tar-pig => {
+      64 => ['templeton-tar-pig-0.0.1.15-1']
+    }
+  }
+  $packages = 'bigtop' 
+  if ($packages == 'hdp') {
+    $package_names[hadoop] = { 32 => ['hadoop.i386'], 64 => ['hadoop.x86_64']}
+    $mapred_smoke_test_script = "/usr/sbin/hadoop-validate-setup.sh"
+    $hadoop_bin = "/usr/sbin"
+    $hadoop_conf_dir = "/etc/hadoop"
+    $zk_conf_dir = "/etc/zookeeper"
+    $hbase_conf_dir = "/etc/hbase"
+    $sqoop_conf_dir = "/etc/sqoop"
+    $pig_conf_dir = "/etc/pig"
+    $oozie_conf_dir = "/etc/oozie"
+    $hadoop_jar_location = "/usr/share/hadoop"
+    $mapred_examples_jar = "${hadoop_jar_location}/hadoop-mapreduce-examples-*.jar"
+    $hbase_daemon_script = "/usr/bin/hbase-daemon.sh"
+    $use_32_bits_on_slaves = false
+    $package_names[zookeeper] = {64 => 'zookeeper.x86_64'}
+    $package_names[hbase] = {64 => 'hbase.x86_64'}
+    $package_names[sqoop] = {32 => 'sqoop-1.4.1-1.noarch'}
+    $package_names[pig] = { 32 => 'pig.i386'}
+    $package_names[hcat-server] = { 64 => 'hcatalog-server.x86_64'}
+    $package_names[hcat-base] = { 64 => 'hcatalog.x86_64'}
+    $zk_bin = '/usr/sbin'
+    $zk_smoke_test_script = '/usr/bin/zkCli.sh'
+    $update_zk_shell_files = false
+
+    $hcat_server_host = hdp_default("hcat_server_host")
+    $hcat_mysql_host = hdp_default("hcat_mysql_host")
+
+  } elsif ($packages == 'bigtop') {  
+
+    $package_names[hadoop] = {32 => ['hadoop', 'hadoop-hdfs', 'hadoop-yarn', 'hadoop-mapreduce', 'hadoop-client', 'hadoop-libhdfs.i386'], 64 => ['hadoop', 'hadoop-hdfs', 'hadoop-yarn', 'hadoop-mapreduce', 'hadoop-client', 'hadoop-libhdfs.x86_64']}
+# TODO - Add lzo, pipes etc.
+# $package_names[hadoop] = {32 => ['hadoop','hadoop-hdfs','hadoop-yarn','hadoop-mapreduce','hadoop-libhdfs.i386','hadoop-native.i386','hadoop-pipes.i386','hadoop-sbin.i386','hadoop-lzo', 'hadoop-lzo-native.i386'], 64 => ['hadoop','hadoop-hdfs','hadoop-yarn','hadoop-mapreduce','hadoop-libhdfs','hadoop-native','hadoop-pipes','hadoop-sbin','hadoop-lzo', 'hadoop-lzo-native']}
+   
+    $mapred_smoke_test_script = "/usr/lib/hadoop/sbin/hadoop-validate-setup.sh"
+    $hadoop_bin = "/usr/lib/hadoop/bin"
+    $hadoop_sbin = "/usr/lib/hadoop/sbin"
+    $yarn_sbin = "/usr/lib/hadoop-yarn/sbin"
+    $mapred_sbin = "/usr/lib/hadoop-mapreduce/sbin"
+    $hadoop_conf_dir = "/etc/hadoop/conf"
+    $zk_conf_dir = "/etc/zookeeper/conf"
+    $hbase_conf_dir = "/etc/hbase/conf/"
+    $sqoop_conf_dir = "/usr/lib/sqoop/conf"
+    $pig_conf_dir = "/etc/pig/conf"
+    $oozie_conf_dir = "/etc/oozie/conf"
+    $hive_conf_dir = "/etc/hive/conf"
+    $hcat_conf_dir = "/etc/hcatalog/conf"
+    $hadoop_jar_location = "/usr/lib/hadoop-mapreduce"
+    $hadoop_client_jar_location = "/usr/lib/hadoop/client"
+    $mapred_examples_jar = "${hadoop_jar_location}/hadoop-mapreduce-examples-*.jar"
+    $hbase_daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+    $use_32_bits_on_slaves = false
+    $package_names[zookeeper] = {64 => ['zookeeper']}
+    $package_names[hbase] = {64 => ['hbase']}
+    $package_names[sqoop] = {32 => ['sqoop'], 64 => ['sqoop']}
+    $package_names[pig] = {32 => ['pig.noarch'], 64 => ['pig.noarch']}
+    $package_names[hcat] = {32 => ['hcatalog'], 64 => ['hcatalog']}
+    $package_names[hive] = {64 => ['hive']}
+    $zk_bin = '/usr/lib/zookeeper/bin'
+    $zk_smoke_test_script = "/usr/lib/zookeeper/bin/zkCli.sh"
+    $update_zk_shell_files = false
+
+    $hive_mysql_host = hdp_default("hive_mysql_host","localhost")
+
+    $hcat_server_host = hdp_default("hive_server_host")
+    $hcat_mysql_host = hdp_default("hive_mysql_host")
+
+  }
+ 
+###### snmp
+
+  $snmp_conf_dir = hdp_default("snmp_conf_dir","/etc/snmp/")
+  $snmp_source = hdp_default("snmp_source","0.0.0.0/0") ##TODO!!! for testing needs to be closed up
+  $snmp_community = hdp_default("snmp_community","hadoop")
+
+###### aux
+  #used by ganglia monitor to tell what components and services are present
+  $component_exists = {} 
+  $service_exists = {} 
+}
+
diff --git a/hmc/puppet/modules/hdp2/manifests/snappy/package.pp b/hmc/puppet/modules/hdp2/manifests/snappy/package.pp
new file mode 100644
index 0000000..f667f94
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/snappy/package.pp
@@ -0,0 +1,52 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2::snappy::package()
+{
+ hdp2::package {'snappy':
+    package_type  => 'snappy',
+    java_needed   => false
+  }
+  
+  hdp2::snappy::package::ln{ 64:} 
+  hdp2::snappy::package::ln{ 32:} 
+  
+  anchor{'hdp2::snappy::package::begin':} ->  Hdp2::Package['snappy'] -> Hdp2::Snappy::Package::Ln<||> -> anchor{'hdp2::snappy::package::end':}
+}
+
+define hdp2::snappy::package::ln()
+{
+  $size = $name
+  $hadoop_home = $hdp2::params::hadoop_home  
+  $snappy_so = $hdp2::params::snappy_so
+  $so_target_dir = $hdp2::params::snappy_compression_so_dirs[$size]
+  $so_target = "${so_target_dir}/libsnappy.so"
+  $so_src_dir = $hdp2::params::snappy_so_src_dir[$size]
+  $so_src = "${so_src_dir}/${snappy_so}" 
+  
+  if ($so_target != $so_src) { 
+    $ln_cmd = "mkdir -p $so_target_dir; ln -sf ${so_src} ${so_target}"
+    hdp2::exec{ "hdp2::snappy::package::ln ${name}":
+      command => $ln_cmd,
+      unless  => "test -f ${so_target}",
+      creates => $so_target
+    }
+  }
+}
diff --git a/hmc/puppet/modules/hdp2/manifests/snmp.pp b/hmc/puppet/modules/hdp2/manifests/snmp.pp
new file mode 100644
index 0000000..eb6dedc
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/snmp.pp
@@ -0,0 +1,48 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2::snmp(
+  $service_state = 'running'
+)
+{
+  include hdp2::params
+
+  hdp2::package {'snmp':}
+ 
+  hdp2::snmp-configfile {'snmpd.conf': 
+    notify => Service['snmpd']    
+  }
+
+  service { 'snmpd' :
+    ensure => $service_state
+  }
+  
+   anchor{'hdp2::snmp::begin':} -> Hdp2::Package['snmp'] -> Hdp2::Snmp-configfile<||> -> Service['snmpd'] -> anchor{'hdp2::snmp::end':}
+}
+
+define hdp2::snmp-configfile()
+{ 
+  hdp2::configfile { "${hdp2::params::snmp_conf_dir}/${name}":
+    component     => 'base',
+    owner         => root,
+    group         => root
+  }
+}
+
diff --git a/hmc/puppet/modules/hdp2/manifests/testing_env_patch.pp b/hmc/puppet/modules/hdp2/manifests/testing_env_patch.pp
new file mode 100644
index 0000000..3ca1d66
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/manifests/testing_env_patch.pp
@@ -0,0 +1,53 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2::testing_env_patch()
+{
+  $cmd = "mkdir /tmp/repos; mv /etc/yum.repos.d/* /tmp/repos"
+  $repo_target = "/etc/yum.repos.d/${hdp2::params::hdp_yum_repo}"
+
+  anchor { 'hdp2::testing_env_patch::begin' :}
+  class{ 'hdp2::iptables': 
+    ensure => stopped,
+    require => Anchor['hdp2::testing_env_patch::begin']
+  }
+  exec { '/bin/echo 0 > /selinux/enforce':
+    require => Class['hdp2::iptables']
+  }
+  hdp2::testing_env_patch::packages { 'common' :
+    require => Exec['/bin/echo 0 > /selinux/enforce']
+  }
+  hdp2::exec { $cmd :
+    command => $cmd,
+    unless => "test -e ${repo_target}",
+    require => Hdp2::Testing_env_patch::Packages['common']
+  }  
+  anchor { 'hdp2::testing_env_patch::end' :
+    require => Exec[$cmd]
+  }
+}
+
+define hdp2::testing_env_patch::packages(
+  $needed = false)
+{
+ if ($needed == true) {
+   package { ['perl-Digest-HMAC','perl-Socket6','perl-Crypt-DES','xorg-x11-fonts-Type1','libdbi'] :} 
+ }
+}
diff --git a/hmc/puppet/modules/hdp2/templates/hdp103.repo.erb b/hmc/puppet/modules/hdp2/templates/hdp103.repo.erb
new file mode 100644
index 0000000..c535b10
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/templates/hdp103.repo.erb
@@ -0,0 +1,6 @@
+[HDP-1.0.7]
+name=Hortonworks Data Platform Version 1.0.7
+baseurl = <%=scope.function_hdp_template_var("hdp_yum_repo_base_url")%>
+enabled=1
+gpgcheck=0
+priority=1
diff --git a/hmc/puppet/modules/hdp2/templates/snmpd.conf.erb b/hmc/puppet/modules/hdp2/templates/snmpd.conf.erb
new file mode 100644
index 0000000..8a93b53
--- /dev/null
+++ b/hmc/puppet/modules/hdp2/templates/snmpd.conf.erb
@@ -0,0 +1,48 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+com2sec notConfigUser  <%=scope.function_hdp_template_var("snmp_source")%>   <%=scope.function_hdp_template_var("snmp_community")%>
+group   notConfigGroup v1           notConfigUser
+group   notConfigGroup v2c           notConfigUser
+view    systemview    included   .1
+access  notConfigGroup ""      any       noauth    exact  systemview none none
+
+syslocation Hadoop 
+syscontact HadoopMaster 
+dontLogTCPWrappersConnects yes
+
+###############################################################################
+# disk checks
+
+disk / 10000
+
+
+###############################################################################
+# load average checks
+#
+
+# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
+#
+# 1MAX:   If the 1 minute load average is above this limit at query
+#         time, the errorFlag will be set.
+# 5MAX:   Similar, but for 5 min average.
+# 15MAX:  Similar, but for 15 min average.
+
+# Check for loads:
+#load 12 14 14
+
diff --git a/hmc/tests/php/orchestrator/test.php b/hmc/tests/php/orchestrator/test.php
index b28c94e..f5e7e08 100644
--- a/hmc/tests/php/orchestrator/test.php
+++ b/hmc/tests/php/orchestrator/test.php
@@ -52,7 +52,7 @@
   private $hbase_regionserver;
 
   function __construct($puppet) {
-    parent::__construct("./test.db", "DataCluster", $puppet);
+    parent::__construct("/homes/hortonmu/HMC/src/php/db/test.db", "DataCluster", $puppet);
 
     $this->namenode = new ServiceComponent($clusterName, "namenode", "hdfs", State::UNINSTALLED, $this, $puppet, FALSE);
     $this->secondary_namenode = new ServiceComponent($clusterName, "secondary_namenode", "hdfs", State::UNINSTALLED, $this, $puppet, FALSE);
@@ -217,7 +217,7 @@
 */
 
 $puppet = new MockPuppetInvoker();
-$db = new OrchestratorDB("./test.db", "DataCluster", $puppet);
+$db = new OrchestratorDB("/homes/hortonmu/HMC/src/php/db/test.db", "DataCluster", $puppet);
 $cluster = new Cluster($clusterName, $db, $puppet);
 $t0 = new Transaction(0, 0, 0);
 print_r($cluster->deployHDP($t0));
diff --git a/mon_dashboard/package/rpm/create_dashboard_rpm.sh b/mon_dashboard/package/rpm/create_dashboard_rpm.sh
index 6cbd5df..177cebe 100755
--- a/mon_dashboard/package/rpm/create_dashboard_rpm.sh
+++ b/mon_dashboard/package/rpm/create_dashboard_rpm.sh
@@ -27,7 +27,7 @@
 fi
 
 if [[ -z "${VERSION}" ]]; then
-  VERSION="0.0.2.14"
+  VERSION="0.0.2.15"
 fi
 
 if [[ -z "${RELEASE}" ]]; then
diff --git a/mon_dashboard/package/rpm/create_ganglia_addon_rpm.sh b/mon_dashboard/package/rpm/create_ganglia_addon_rpm.sh
index e6548df..3dc5afc 100755
--- a/mon_dashboard/package/rpm/create_ganglia_addon_rpm.sh
+++ b/mon_dashboard/package/rpm/create_ganglia_addon_rpm.sh
@@ -27,7 +27,7 @@
 fi
 
 if [[ -z "${VERSION}" ]]; then
-  VERSION="0.0.2.14"
+  VERSION="0.0.2.15"
 fi
 
 if [[ -z "${RELEASE}" ]]; then
diff --git a/mon_dashboard/package/rpm/create_nagios_addon_rpm.sh b/mon_dashboard/package/rpm/create_nagios_addon_rpm.sh
index 80ed915..e0874d0 100755
--- a/mon_dashboard/package/rpm/create_nagios_addon_rpm.sh
+++ b/mon_dashboard/package/rpm/create_nagios_addon_rpm.sh
@@ -27,7 +27,7 @@
 fi
 
 if [[ -z "${VERSION}" ]]; then
-  VERSION="0.0.2.14"
+  VERSION="0.0.2.15"
 fi
 
 if [[ -z "${RELEASE}" ]]; then
diff --git a/mon_dashboard/package/rpm/hdp_mon_dashboard.spec b/mon_dashboard/package/rpm/hdp_mon_dashboard.spec
index 25550f9..e2ea389 100644
--- a/mon_dashboard/package/rpm/hdp_mon_dashboard.spec
+++ b/mon_dashboard/package/rpm/hdp_mon_dashboard.spec
@@ -24,17 +24,21 @@
 
 Summary: HDP Monitoring Dashboard Frontend
 Name: hdp_mon_dashboard
-Version: 0.0.2.14
-URL: http://incubator.apache.org/projects/ambari.html
+Version: 0.0.2.15
+URL: http://hortonworks.com
 Release: 1
 License: Apache License, Version 2.0
-Vendor: Apache Software Foundation (ambari-dev@incubator.apache.org)
+Vendor: Hortonworks <ambari-group@hortonworks.com>
 Group: System Environment/Base
 Source: %{name}-%{version}.tar.gz
 Buildroot: %{_tmppath}/%{name}-%{version}-buildroot
 Requires: php >= 5, httpd
 %define web_prefixdir %{_prefix}/share/hdp/dashboard
+%if 0%{?suse_version}
+%define httpd_confdir %{_sysconfdir}/apache2/conf.d
+%else
 %define httpd_confdir %{_sysconfdir}/httpd/conf.d
+%endif
 BuildArchitectures: noarch
 
 %description
@@ -55,16 +59,29 @@
 
 %__cp -rf dataServices/* $RPM_BUILD_ROOT/%{web_prefixdir}/dataServices/
 %__cp -rf ui/* $RPM_BUILD_ROOT/%{web_prefixdir}/ui/
-echo "Alias /hdp %{_prefix}/share/hdp" > $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_dashboard.conf
+
+echo "Alias /hdp %{_prefix}/share/hdp" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_dashboard.conf
+echo "<Directory /usr/share/hdp>" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_dashboard.conf
+echo "  Options None" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_dashboard.conf
+echo "  AllowOverride None" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_dashboard.conf
+echo "  Order allow,deny" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_dashboard.conf
+echo "  Allow from all" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_dashboard.conf
+echo "</Directory>" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_dashboard.conf
 
 %files
 %defattr(-,root,root)
-%{web_prefixdir}/*
 %{httpd_confdir}/hdp_mon_dashboard.conf
+%if 0%{?suse_version}
+%defattr(-,wwwrun,www)
+%{web_prefixdir}/*
+%else
+%defattr(-,root,root)
+%{web_prefixdir}/*
+%endif
 
 %clean
 %__rm -rf $RPM_BUILD_ROOT
 
 %changelog
-* Thu Jun 07 2012 Ambari <ambari-dev@incubator.apache.org>
+* Fri Feb 17 2011 Hortonworks <ambari-group@hortonworks.com>
 - Initial version
diff --git a/mon_dashboard/package/rpm/hdp_mon_ganglia_addons.spec b/mon_dashboard/package/rpm/hdp_mon_ganglia_addons.spec
index b711231..186ad0d 100644
--- a/mon_dashboard/package/rpm/hdp_mon_ganglia_addons.spec
+++ b/mon_dashboard/package/rpm/hdp_mon_ganglia_addons.spec
@@ -24,17 +24,23 @@
 
 Summary: Ganglia Add-ons for HDP Monitoring Dashboard
 Name: hdp_mon_ganglia_addons
-Version: 0.0.2.14
-URL: http://incubator.apache.org/projects/ambari.html
+Version: 0.0.2.15
+URL: http://hortonworks.com
 Release: 1
 License: Apache License, Version 2.0
-Vendor: Apache Software Foundation (ambari-dev@incubator.apache.org)
+Vendor: Hortonworks <ambari-group@hortonworks.com>
 Group: System Environment/Base
 Source: %{name}-%{version}.tar.gz
 Buildroot: %{_tmppath}/%{name}-%{version}-buildroot
 Requires: gweb >= 2.2
+
+%if 0%{?suse_version}
+%define graphd_dir /srv/www/htdocs/ganglia/graph.d/
+%else
 %define graphd_dir /var/www/html/ganglia/graph.d/
+%endif
 %define gconf_dir /var/lib/ganglia/conf/
+
 BuildArchitectures: noarch
 
 %description
@@ -65,5 +71,5 @@
 %__rm -rf $RPM_BUILD_ROOT
 
 %changelog
-* Thu Jun 07 2011 Ambari <ambari-dev@incubator.apache.org>
+* Fri Feb 17 2011 Hortonworks <ambari-group@hortonworks.com>
 - Initial version
diff --git a/mon_dashboard/package/rpm/hdp_mon_nagios_addons.spec b/mon_dashboard/package/rpm/hdp_mon_nagios_addons.spec
index 6f54e9b..b44fbe9 100644
--- a/mon_dashboard/package/rpm/hdp_mon_nagios_addons.spec
+++ b/mon_dashboard/package/rpm/hdp_mon_nagios_addons.spec
@@ -24,18 +24,22 @@
 
 Summary: Nagios Add-ons for HDP Monitoring Dashboard
 Name: hdp_mon_nagios_addons
-Version: 0.0.2.14
-URL: http://incubator.apache.org/projects/ambari.html
+Version: 0.0.2.15
+URL: http://hortonworks.com
 Release: 1
 License: Apache License, Version 2.0
-Vendor: Apache Software Foundation (ambari-dev@incubator.apache.org)
+Vendor: Hortonworks <ambari-group@hortonworks.com>
 Group: System Environment/Base
 Source: %{name}-%{version}.tar.gz
 Buildroot: %{_tmppath}/%{name}-%{version}-buildroot
 Requires: nagios, nagios-plugins, php >= 5
 %define nagioshdpscripts_dir %{_prefix}/share/hdp/nagios
 %define nagiosplugin_dir %{_libdir}/nagios/plugins
+%if 0%{?suse_version}
+%define httpd_confdir %{_sysconfdir}/apache2/conf.d
+%else
 %define httpd_confdir %{_sysconfdir}/httpd/conf.d
+%endif
 BuildArchitectures: noarch
 
 %description
@@ -56,7 +60,13 @@
 
 %__cp -rf scripts/* $RPM_BUILD_ROOT/%{nagioshdpscripts_dir}/
 %__cp -rf plugins/* $RPM_BUILD_ROOT/%{nagiosplugin_dir}/
-echo "Alias /hdp %{_prefix}/share/hdp" > $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "Alias /hdp %{_prefix}/share/hdp" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "<Directory /usr/share/hdp>" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "  Options None" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "  AllowOverride None" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "  Order allow,deny" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "  Allow from all" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
+echo "</Directory>" >> $RPM_BUILD_ROOT/%{httpd_confdir}/hdp_mon_nagios_addons.conf
 
 %files
 %defattr(-,root,root)
@@ -68,5 +78,5 @@
 %__rm -rf $RPM_BUILD_ROOT
 
 %changelog
-* Thu Jun 07 2012 Ambari <ambari-dev@incubator.apache.org>
+* Fri Feb 17 2011 Hortonworks <ambari-group@hortonworks.com>
 - Initial version
diff --git a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json
index f1dc1db..6114111 100644
--- a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json
+++ b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_hdfs_io_report.json
@@ -4,7 +4,7 @@
    "title" : "HDFS I/O",
    "vertical_label" : "Bytes/Sec",
    "series" : [
-      { "metric": "dfs.datanode.bytes_written", "color": "ff0000", "label": "Bytes Written/Sec", "line_width": "2", "type": "line" },
-      { "metric": "dfs.datanode.bytes_read", "color": "0000ff", "label": "Bytes Read/Sec", "line_width": "2", "type": "line" }
+      { "metric": "dfs.datanode.BytesWritten", "color": "ff0000", "label": "Bytes Written/Sec", "line_width": "2", "type": "line" },
+      { "metric": "dfs.datanode.BytesRead", "color": "0000ff", "label": "Bytes Read/Sec", "line_width": "2", "type": "line" }
    ]
 }
diff --git a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json
index 2bc8ca0..82a14b3 100644
--- a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json
+++ b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_gc_report.json
@@ -4,7 +4,7 @@
    "title" : "Time spent in Garbage Collection",
    "vertical_label" : "Milliseconds",
    "series" : [
-      { "metric": "jvm.metrics.gcTimeMillis", "color": "ff0000", "label": "Time Spent", 
+      { "metric": "jvm.JvmMetrics.GcTimeMillis", "color": "ff0000", "label": "Time Spent", 
         "line_width": "2", "type": "stack" }
    ]
 }
diff --git a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json
index 2e2b238..4d1d93a 100644
--- a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json
+++ b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_mem_heap_used_report.json
@@ -4,7 +4,7 @@
    "title" : "JVM Heap Memory Used",
    "vertical_label" : "MB",
    "series" : [
-      { "metric": "jvm.metrics.memHeapUsedM", "color": "ff0000", "label": "Heap Memory Used", 
+      { "metric": "jvm.JvmMetrics.MemHeapUsedM", "color": "ff0000", "label": "Heap Memory Used", 
         "line_width": "2", "type": "line" }
    ]
 }
diff --git a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json
index 9734587..860cb5b 100644
--- a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json
+++ b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_jvm_threads_report.json
@@ -4,13 +4,13 @@
    "title" : "JVM Threads Status",
    "vertical_label" : "Number Of Threads",
    "series" : [
-      { "metric": "jvm.metrics.threadsBlocked", "color": "ff0000", "label": "Blocked", 
+      { "metric": "jvm.JvmMetrics.ThreadsBlocked", "color": "ff0000", "label": "Blocked", 
         "line_width": "2", "type": "stack" },
-      { "metric": "jvm.metrics.threadsWaiting", "color": "ff6600", "label": "Waiting",
+      { "metric": "jvm.JvmMetrics.ThreadsWaiting", "color": "ff6600", "label": "Waiting",
         "line_width": "2", "type": "stack" },
-      { "metric": "jvm.metrics.threadsTimedWaiting", "color": "ffff00", "label": "Timed Waiting",
+      { "metric": "jvm.JvmMetrics.ThreadsTimedWaiting", "color": "ffff00", "label": "Timed Waiting",
         "line_width": "2", "type": "stack" },
-      { "metric": "jvm.metrics.threadsRunnable", "color": "00ff00", "label": "Runnable", 
+      { "metric": "jvm.JvmMetrics.ThreadsRunnable", "color": "00ff00", "label": "Runnable", 
         "line_width": "2", "type": "stack" }
    ]
 }
diff --git a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json
index 928a20f..2885d4e 100644
--- a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json
+++ b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_latency_report.json
@@ -4,9 +4,9 @@
    "title" : "Average RPC Latencies",
    "vertical_label" : "Seconds",
    "series" : [
-      { "metric": "rpc.rpc.RpcProcessingTime_avg_time", "color": "0000ff", "label": "Average Processing Time", 
+      { "metric": "rpc.rpc.RpcProcessingTimeAvgTime", "color": "0000ff", "label": "Average Processing Time", 
         "line_width": "2", "type": "stack" },
-      { "metric": "rpc.rpc.RpcQueueTime_avg_time", "color": "ff0000", "label": "Average Queue Time", 
+      { "metric": "rpc.rpc.RpcQueueTimeAvgTime", "color": "ff0000", "label": "Average Queue Time", 
         "line_width": "2", "type": "stack" }
    ]
 }
diff --git a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json
index d0038a5..269e967 100644
--- a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json
+++ b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_avg_time_report.json
@@ -4,7 +4,7 @@
    "title" : "Average RPC Wait Time",
    "vertical_label" : "Seconds",
    "series" : [
-      { "metric": "rpc.rpc.RpcQueueTime_avg_time", "color": "ff0000", "label": "Avg RPC Wait Time", 
+      { "metric": "rpc.rpc.RpcQueueTimeAvgTime", "color": "ff0000", "label": "Avg RPC Wait Time", 
         "line_width": "2", "type": "line" }
    ]
 }
diff --git a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json
index 3a6f3fd..739a522 100644
--- a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json
+++ b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpc_queue_time_num_ops_report.json
@@ -4,7 +4,7 @@
    "title" : "Average RPC Operations",
    "vertical_label" : "Operations/Sec",
    "series" : [
-      { "metric": "rpc.rpc.RpcQueueTime_num_ops", "color": "ff0000", "label": "Avg RPC Ops", 
+      { "metric": "rpc.rpc.RpcQueueTimeNumOps", "color": "ff0000", "label": "Avg RPC Ops", 
         "line_width": "2", "type": "line" }
    ]
 }
diff --git a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json
index a689884..bdd63b6 100644
--- a/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json
+++ b/mon_dashboard/src/addOns/ganglia/graph.d/hdp_mon_rpcdetailed_heartbeat_num_ops_report.json
@@ -4,7 +4,7 @@
    "title" : "Heartbeats",
    "vertical_label" : "Heartbeats/Sec",
    "series" : [
-      { "metric": "rpcdetailed.rpcdetailed.sendHeartbeat_num_ops", "color": "ff0000", "label": "Heartbeats/Sec", 
+      { "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps", "color": "ff0000", "label": "Heartbeats/Sec", 
         "line_width": "2", "type": "line" }
    ]
 }
diff --git a/mon_dashboard/src/addOns/nagios/plugins/check_aggregate.php b/mon_dashboard/src/addOns/nagios/plugins/check_aggregate.php
index a82ec76..559c845 100755
--- a/mon_dashboard/src/addOns/nagios/plugins/check_aggregate.php
+++ b/mon_dashboard/src/addOns/nagios/plugins/check_aggregate.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -114,6 +113,9 @@
       case "NAMENODE":
         $pieces[0] = "HDFS";
         break; 
+      case "RESOURCEMANAGER":
+        $pieces[0] = "YARN";
+        break; 
       case "JOBTRACKER":
         $pieces[0] = "MAPREDUCE";
         break; 
@@ -123,6 +125,7 @@
       case "SYSTEM":
       case "HDFS":
       case "MAPREDUCE":
+      case "YARN":
       case "HBASE":
         break; 
       default:
diff --git a/mon_dashboard/src/addOns/nagios/plugins/check_hdfs_blocks.php b/mon_dashboard/src/addOns/nagios/plugins/check_hdfs_blocks.php
index 1423df1..c20d406 100755
--- a/mon_dashboard/src/addOns/nagios/plugins/check_hdfs_blocks.php
+++ b/mon_dashboard/src/addOns/nagios/plugins/check_hdfs_blocks.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/mon_dashboard/src/addOns/nagios/plugins/check_hdfs_capacity.php b/mon_dashboard/src/addOns/nagios/plugins/check_hdfs_capacity.php
index 43e371a..a2686c5 100755
--- a/mon_dashboard/src/addOns/nagios/plugins/check_hdfs_capacity.php
+++ b/mon_dashboard/src/addOns/nagios/plugins/check_hdfs_capacity.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/mon_dashboard/src/addOns/nagios/plugins/check_name_dir_status.php b/mon_dashboard/src/addOns/nagios/plugins/check_name_dir_status.php
index 61356a3..d2bf843 100644
--- a/mon_dashboard/src/addOns/nagios/plugins/check_name_dir_status.php
+++ b/mon_dashboard/src/addOns/nagios/plugins/check_name_dir_status.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
diff --git a/mon_dashboard/src/addOns/nagios/plugins/check_oozie_status.sh b/mon_dashboard/src/addOns/nagios/plugins/check_oozie_status.sh
index 70b73c5..f22da8b 100755
--- a/mon_dashboard/src/addOns/nagios/plugins/check_oozie_status.sh
+++ b/mon_dashboard/src/addOns/nagios/plugins/check_oozie_status.sh
@@ -20,6 +20,7 @@
 #
 #
 # OOZIE_URL is of the form http://<hostname>:<port>/oozie
+# OOZIE_URL: http://hortonworks-sandbox.localdomain:11000/oozie
 HOST=$1
 PORT=$2
 JAVA_HOME=$3
diff --git a/mon_dashboard/src/addOns/nagios/plugins/check_rpcq_latency.php b/mon_dashboard/src/addOns/nagios/plugins/check_rpcq_latency.php
index a150256..3517f85 100755
--- a/mon_dashboard/src/addOns/nagios/plugins/check_rpcq_latency.php
+++ b/mon_dashboard/src/addOns/nagios/plugins/check_rpcq_latency.php
@@ -1,4 +1,3 @@
-#!/usr/bin/php
 <?php
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -22,7 +21,7 @@
  * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
  * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
  * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode
+ * Service Name = JobTracker, ResourceManager, NameNode
  */
 
   $options = getopt ("h:p:w:c:n:");
@@ -63,6 +62,6 @@
 
   /* print usage */
   function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode> -w <warn_in_sec> -c <crit_in_sec>\n";
+    echo "Usage: $0 -h <host> -p port -n <JobTracker/ResourceManager/NameNode> -w <warn_in_sec> -c <crit_in_sec>\n";
   }
 ?>
diff --git a/mon_dashboard/src/addOns/nagios/plugins/check_webui.sh b/mon_dashboard/src/addOns/nagios/plugins/check_webui.sh
index 0f9634f..a7228db 100644
--- a/mon_dashboard/src/addOns/nagios/plugins/check_webui.sh
+++ b/mon_dashboard/src/addOns/nagios/plugins/check_webui.sh
@@ -42,6 +42,14 @@
       exit 1;
     fi
     ;;
+# Same file for both hdp1 and hdp2, so add YARN processes also here.
+resourcemanager) 
+    rmweburl="http://$host:8088"
+    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
+      echo "WARNING: ResourceManager web UI not accessible : $rmweburl";
+      exit 1;
+    fi
+    ;;
 namenode)
     nnweburl="http://$host:50070"
     if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
@@ -56,6 +64,14 @@
       exit 1;
     fi
     ;;
+# For HDP2
+jobhistoryserver2)
+    jhs2weburl="http://$host:19888/"
+    if [[ `checkurl "$jhs2weburl"` -ne 0 ]]; then 
+      echo "WARNING: JobhistoryServer web UI not accessible : $jhs2weburl";
+      exit 1;
+    fi
+    ;;
 hbase)
     hbaseweburl="http://$host:60010/master-status"
     jhweburl="http://domU-12-31-39-16-DC-FB.compute-1.internal:51111/jobhistoryhome.jsp"
@@ -64,7 +80,7 @@
       exit 1;
     fi
     ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode]"
+*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|resourcemanager|jobhistory|jobhistoryserver2|hbase|namenode]"
    exit 3
    ;;
 esac
diff --git a/mon_dashboard/src/addOns/nagios/scripts/nagios_alerts.php b/mon_dashboard/src/addOns/nagios/scripts/nagios_alerts.php
index 53f9204..62662ce 100644
--- a/mon_dashboard/src/addOns/nagios/scripts/nagios_alerts.php
+++ b/mon_dashboard/src/addOns/nagios/scripts/nagios_alerts.php
@@ -93,13 +93,21 @@
 
   define ("HDFS_SERVICE_CHECK", "NAMENODE::Namenode Process down");
   define ("MAPREDUCE_SERVICE_CHECK", "JOBTRACKER::Jobtracker Process down");
+  define ("YARN_SERVICE_CHECK", "RESOURCEMANAGER::ResourceManager Process down");
   define ("HBASE_SERVICE_CHECK", "HBASEMASTER::HBaseMaster Process down");
   define ("ZOOKEEPER_SERVICE_CHECK", "ZOOKEEPER::Percent zookeeper servers down");
   define ("HIVE_METASTORE_SERVICE_CHECK", "HIVE-METASTORE::HIVE-METASTORE status check");
   define ("OOZIE_SERVICE_CHECK", "OOZIE::Oozie status check");
   define ("TEMPLETON_SERVICE_CHECK", "TEMPLETON::Templeton status check");
+  define ("PUPPET_SERVICE_CHECK", "PUPPET::Puppet agent down");
 
-  $status_file="/var/nagios/status.dat";
+  /* If SUSE, status file is under /var/lib/nagios */
+  if (file_exists("/etc/SuSE-release")) {
+    $status_file="/var/lib/nagios/status.dat";
+  } else {
+    $status_file="/var/nagios/status.dat";
+  }
+
   $q1="";
   if (array_key_exists('q1', $_GET)) {
     $q1=$_GET["q1"];
@@ -152,6 +160,7 @@
   function query_service_states ($status_file_content) {
     $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
     $services_object = array ();
+    $services_object["PUPPET"] = 0;
     foreach ($matches[0] as $object) {
       if (getParameter($object, "service_description") == HDFS_SERVICE_CHECK) {
         $services_object["HDFS"] = getParameter($object, "last_hard_state");
@@ -205,6 +214,16 @@
         }
         continue;
       }
+      if (getParameter($object, "service_description") == PUPPET_SERVICE_CHECK) {
+        $state = getParameter($object, "last_hard_state");
+        if ($state >= 1) {
+          $services_object["PUPPET"]++;
+        }
+        continue;
+      }
+    }
+    if ($services_object["PUPPET"] >= 1) {
+      $services_object["PUPPET"] = 1;
     }
     return $services_object;
   }
@@ -347,6 +366,9 @@
       case "NAMENODE":
         $pieces[0] = "HDFS";
         break;
+      case "RESOURCEMANAGER":
+        $pieces[0] = "YARN";
+        break;
       case "JOBTRACKER":
         $pieces[0] = "MAPREDUCE";
         break;
@@ -360,6 +382,7 @@
       case "HIVE-METASTORE":
       case "OOZIE":
       case "TEMPLETON":
+      case "PUPPET":
         break;
       default:
         $pieces[0] = "UNKNOWN";
diff --git a/mon_dashboard/src/dataServices/ganglia/graph_info/dashboard/custom/hdp.json b/mon_dashboard/src/dataServices/ganglia/graph_info/dashboard/custom/hdp.json
index 6a98991..81bece5 100644
--- a/mon_dashboard/src/dataServices/ganglia/graph_info/dashboard/custom/hdp.json
+++ b/mon_dashboard/src/dataServices/ganglia/graph_info/dashboard/custom/hdp.json
@@ -1,28 +1,49 @@
 {
     "Global": [
-        {
-            "description": "Bytes written to and read from HDFS, aggregated across all the DataNodes", 
-            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%", 
-            "title": "HDFS I/O", 
+        {   
+            "description": "Bytes written to and read from HDFS, aggregated across all the DataNodes",
+            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%HDFSSlavesClusterName%",
+            "title": "HDFS I/O",
             "url": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%HDFSSlavesClusterName%&g=hdp_mon_hdfs_io_report"
-        }, 
+        },
+        {   
+            "description": "HDFS Capacity Remaining",
+            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%",
+            "title": "HDFS Capacity Remaining",
+            "url": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=hdp_mon_hdfs_capacity_remaining_report"
+        },
+        {   
+            "description": "HDFS operations per second",
+            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%",
+            "title": "HDFS I/O",
+            "url": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=hdp_mon_hdfs_ops_report"
+        },
+        {   
+            "description": "HDFS under replicated blocks",
+            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%NameNodeClusterName%",
+            "title": "HDFS Under-Replicated Blocks",
+            "url": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%NameNodeClusterName%&g=hdp_mon_hdfs_under_replicated_blocks_report"
+        },
+    ],
+    "CommentedOutEditAndMoveBackAsNecessary" : [
         {
-            "description": "Total Map slots and Utilized Map slots (occupied + reserved)", 
-            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%", 
-            "title": "Map Slot Utilization", 
+            "description": "Total Map slots and Utilized Map slots (occupied + reserved)",
+            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%",
+            "title": "Map Slot Utilization",
             "url": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_map_slot_report"
-        }, 
-        {
-            "description": "Total Reduce slots and Utilized Reduce slots (occupied + reserved)", 
-            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%", 
-            "title": "Reduce Slot Utilization", 
+        },
+        {   
+            "description": "Total Reduce slots and Utilized Reduce slots (occupied + reserved)",
+            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%",
+            "title": "Reduce Slot Utilization",
             "url": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_reduce_slot_report"
-        }, 
-        {
-            "description": "Map and Reduce tasks waiting to be scheduled (indicates load on the Map/Reduce cluster)", 
-            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%", 
-            "title": "Waiting Map/Reduce Tasks", 
+        },
+        {   
+            "description": "Map and Reduce tasks waiting to be scheduled (indicates load on the Map/Reduce cluster)",
+            "link": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/?c=%JobTrackerClusterName%",
+            "title": "Waiting Map/Reduce Tasks",
             "url": "http://%GangliaWebHostName%:%GangliaWebPort%/ganglia/graph.php?c=%JobTrackerClusterName%&g=hdp_mon_jobtracker_mapreduce_report"
         }
+
     ]
 }
diff --git a/mon_dashboard/src/ui/hbase.html b/mon_dashboard/src/ui/hbase.html
index 3ed56b1..298ef4a 100644
--- a/mon_dashboard/src/ui/hbase.html
+++ b/mon_dashboard/src/ui/hbase.html
@@ -25,13 +25,13 @@
 	<link type="text/css" rel="stylesheet" href="./libs/jqgrid/css/ui.jqgrid.css" media="screen"/>
 	<link type="text/css" rel="stylesheet" href="./libs/jquery.multiselect.css" media="screen"/>
 	<!--[if lt IE 9]><script type="text/javascript" src="./libs/html5shiv/html5shiv.min.js"></script><![endif]-->
-	<link rel="shortcut icon" href="/hmc/images/logo-micro.gif"/>
+	<link rel="shortcut icon" href="./logo/logo-micro.gif"/>
 </head>
 <body>
   <div class="navbar">
     <div class="navbar-inner">
       <div class="container">
-        <a href="/hmc/html"><img id="logo" src="/hmc/images/logo-small.gif"></a>
+        <a href="/hmc/html"><img id="logo" src="./logo/logo-small.gif"></a>
         <a id="brand" class="brand" href="/hmc/html"></a>
         <ul class="nav">
           <li><a href="/hmc/html">Cluster Management</a></li>
@@ -49,17 +49,19 @@
 			<ul>
 				<li><a href="./home.html">Dashboard</a></li>
 				<li><a href="./hdfs.html">HDFS</a></li>
+                                <!--
 				<li><a href="./mapreduce.html">MapReduce</a></li>
+                                -->
 				<li><a id="hbase_link" href="./hbase.html" class="selectedNavItem">HBase</a></li>
 			</ul>
 		</nav>
 		
 		<section id="links">
-			<a class="roundedBox" href="" target="_blank">Post Feedback</a>
-			<a class="roundedBox" href="" target="_blank">Documentation</a>
-			<a id="nagios" class="roundedBox" href="" target="_blank">Nagios</a>
-			<a id="ganglia" class="roundedBox" href="" target="_blank">Ganglia</a>
-                        <a id="oozie" class="hiddenLink" href="" target="_blank">Oozie</a>
+      <a class="roundedBox" href="http://hortonworks.com/community/forums/" target="_blank">Post Feedback</a>
+      <a id="docsLink" class="roundedBox" href="javascript:void(0)" target="_blank">Documentation</a>
+      <a id="nagios" class="roundedBox" href="" target="_blank">Nagios</a>
+      <a id="ganglia" class="roundedBox" href="" target="_blank">Ganglia</a>
+      <a id="oozie" class="hiddenLink" href="" target="_blank">Oozie</a>
 		</section>
 		<section class="clear"></section>
 	</section>
diff --git a/mon_dashboard/src/ui/hdfs.html b/mon_dashboard/src/ui/hdfs.html
index f6fd03e..cbe9bb0 100644
--- a/mon_dashboard/src/ui/hdfs.html
+++ b/mon_dashboard/src/ui/hdfs.html
@@ -25,13 +25,13 @@
 	<link type="text/css" rel="stylesheet" href="./libs/jqgrid/css/ui.jqgrid.css" media="screen"/>
 	<link type="text/css" rel="stylesheet" href="./libs/jquery.multiselect.css" media="screen"/>
 	<!--[if lt IE 9]><script type="text/javascript" src="./libs/html5shiv/html5shiv.min.js"></script><![endif]-->
-	<link rel="shortcut icon" href="/hmc/images/logo-micro.gif"/>
+	<link rel="shortcut icon" href="./logo/logo-micro.gif"/>
 </head>
 <body>
   <div class="navbar">
     <div class="navbar-inner">
       <div class="container">
-        <a href="/hmc/html"><img id="logo" src="/hmc/images/logo-small.gif"></a>
+        <a href="/hmc/html"><img id="logo" src="./logo/logo-small.gif"></a>
         <a id="brand" class="brand" href="/hmc/html"></a>				
         <ul class="nav">
           <li><a href="/hmc/html">Cluster Management</a></li>
@@ -49,17 +49,19 @@
 			<ul>
 				<li><a href="./home.html">Dashboard</a></li>
 				<li><a href="./hdfs.html" class="selectedNavItem">HDFS</a></li>
+                                <!--
 				<li><a href="./mapreduce.html">MapReduce</a></li>
+                                -->
 				<li><a id="hbase_link" href="./hbase.html">HBase</a></li>
 			</ul>
 		</nav>
 		
 		<section id="links">
-			<a class="roundedBox" href="" target="_blank">Post Feedback</a>
-			<a class="roundedBox" href="" target="_blank">Documentation</a>
-			<a id="nagios" class="roundedBox" href="" target="_blank">Nagios</a>
-			<a id="ganglia" class="roundedBox" href="" target="_blank">Ganglia</a>
-                        <a id="oozie" class="hiddenLink" href="" target="_blank">Oozie</a>
+      <a class="roundedBox" href="http://hortonworks.com/community/forums/" target="_blank">Post Feedback</a>
+      <a id="docsLink" class="roundedBox" href="javascript:void(0)" target="_blank">Documentation</a>
+      <a id="nagios" class="roundedBox" href="" target="_blank">Nagios</a>
+      <a id="ganglia" class="roundedBox" href="" target="_blank">Ganglia</a>
+      <a id="oozie" class="hiddenLink" href="" target="_blank">Oozie</a>
 		</section>
 		<section class="clear"></section>
 	</section>
diff --git a/mon_dashboard/src/ui/home.html b/mon_dashboard/src/ui/home.html
index aa33eb5..88a45f2 100644
--- a/mon_dashboard/src/ui/home.html
+++ b/mon_dashboard/src/ui/home.html
@@ -25,14 +25,14 @@
   <link type="text/css" rel="stylesheet" href="./libs/jqgrid/css/ui.jqgrid.css" media="screen"/>
   <link type="text/css" rel="stylesheet" href="./libs/jquery.multiselect.css" media="screen"/>
   <!--[if lt IE 9]><script type="text/javascript" src="./libs/html5shiv/html5shiv.min.js"></script><![endif]-->
-  <link rel="shortcut icon" href="/hmc/images/logo-micro.gif"/>
+  <link rel="shortcut icon" href="./logo/logo-micro.gif"/>
 </head>
 <body>
   <div class="navbar">
     <div class="navbar-inner">
       <div class="container">
-        <a href="/hmc/html"><img id="logo" src="/hmc/images/logo-small.gif"></a>
-        <a id="brand" class="brand" href="/hmc/html">Ambari</a>
+        <a href="/hmc/html"><img id="logo" src="./logo/logo-small.gif"></a>
+        <a id="brand" class="brand" href="/hmc/html"></a>
         <ul class="nav">
           <li><a href="/hmc/html">Cluster Management</a></li>
           <li class="active"><a href="/hdp/dashboard/ui/home.html">Monitoring</a></li>
@@ -48,14 +48,16 @@
       <ul>
         <li><a href="./home.html" class="selectedNavItem">Dashboard</a></li>
         <li><a href="./hdfs.html">HDFS</a></li>
+        <!--
         <li><a href="./mapreduce.html">MapReduce</a></li>
+        -->
         <li><a id="hbase_link" href="./hbase.html">HBase</a></li>
       </ul>
     </nav>
     
     <section id="links">
-      <a class="roundedBox" href="" target="_blank">Post Feedback</a>
-      <a class="roundedBox" href="" target="_blank">Documentation</a>
+      <a class="roundedBox" href="http://hortonworks.com/community/forums/" target="_blank">Post Feedback</a>
+      <a id="docsLink" class="roundedBox" href="javascript:void(0)" target="_blank">Documentation</a>
       <a id="nagios" class="roundedBox" href="" target="_blank">Nagios</a>
       <a id="ganglia" class="roundedBox" href="" target="_blank">Ganglia</a>
       <a id="oozie" class="hiddenLink" href="" target="_blank">Oozie</a>
@@ -107,6 +109,8 @@
         </tbody>
       </table>
       <br/>
+      <!-- TODO: Fix this to change to YARN -->
+      <div style="visibility:hidden">
       <table id="clusterSummaryGrid2" class="clusterSummaryGrid" class="grid">
         <thead>
           <tr><th id="mapredsummarytitle" colspan="2">MapReduce</th></tr>
@@ -117,6 +121,7 @@
           <tr><td>Running & Waiting Jobs</td><td></td></tr>
         </tbody>
       </table>
+      </div>
       <br/>
       <table id="clusterSummaryGrid3" class="clusterSummaryGrid" class="grid">
         <thead>
@@ -163,15 +168,15 @@
   </section>
   
   <footer id="footer"></footer>
-</container>
-  <script type="text/javascript" src="./libs/jquery-1.7.1.js"></script>
-  <script type="text/javascript" src="./libs/jqueryui/js/jquery-ui-1.7.3.custom.min.js"></script>
-  <script type="text/javascript" src="./libs/jquery.multiselect.js"></script>
-  <script type="text/javascript" src="./libs/jqgrid/js/i18n/grid.locale-en.js"></script>
-  <script type="text/javascript" src="./libs/jqgrid/js/jquery.jqGrid.src.js"></script>
-  <script type="text/javascript" src="./libs/raphael/raphael.js"></script>
-  <script type="text/javascript" src="./libs/raphael/g.raphael.min.js"></script>
-  <script type="text/javascript" src="./libs/raphael/g.pie.min.js"></script>
-  <script type="text/javascript" src="./js/common.js"></script>  
+</div>
+<script type="text/javascript" src="./libs/jquery-1.7.1.js"></script>
+<script type="text/javascript" src="./libs/jqueryui/js/jquery-ui-1.7.3.custom.min.js"></script>
+<script type="text/javascript" src="./libs/jquery.multiselect.js"></script>
+<script type="text/javascript" src="./libs/jqgrid/js/i18n/grid.locale-en.js"></script>
+<script type="text/javascript" src="./libs/jqgrid/js/jquery.jqGrid.src.js"></script>
+<script type="text/javascript" src="./libs/raphael/raphael.js"></script>
+<script type="text/javascript" src="./libs/raphael/g.raphael.min.js"></script>
+<script type="text/javascript" src="./libs/raphael/g.pie.min.js"></script>
+<script type="text/javascript" src="./js/common.js"></script>
 </body>
 </html>
diff --git a/mon_dashboard/src/ui/js/common.js b/mon_dashboard/src/ui/js/common.js
index 74582b0..772646f 100644
--- a/mon_dashboard/src/ui/js/common.js
+++ b/mon_dashboard/src/ui/js/common.js
@@ -29,13 +29,20 @@
     alerts, hostcounts, hbase_installed, hbase_link, hbase, 
     firstService, graphCounter = 0, errorFlag = false, errors = [], auto_refresh = false,
     gangliaErrorCount = 0;
+
+  var APP_NAME = 'Hortonworks Management Center';
+  var FOOTER_TEXT = 'Hortonworks &copy 2012<br><a href="http://www.apache.org/licenses/LICENSE-2.0" target="_blank">Licensed under the Apache License, Version 2.0</a>.<br><a href="/hmc/licenses/NOTICE.txt" target="_blank">See third-party tools/resources that Ambari uses and their respective authors</a>';
+  var TROUBLESHOOT_URL = 'http://docs.hortonworks.com/CURRENT/index.htm#Monitoring_HDP/Using_Nagios_With_Hadoop/Nagios_Alerts_For_Hadoop_Services.htm';
+  var DOCS_URL = 'http://docs.hortonworks.com/CURRENT/index.htm#Monitoring_HDP/Using_HDP_Monitoring_Dashboard/Using_HDP_Monitoring_Dashboard.htm';
+  var HELP_URL = 'http://docs.hortonworks.com/CURRENT/index.htm#Deploying_Hortonworks_Data_Platform/Using_HMC/Troubleshooting/Troubleshooting_HMC_Deployments.htm';
   
   // on document ready
   $(document).ready(function(){
-    document.title = 'Ambari';
-    $('#brand').html('Ambari');
-    $('#footer').html('<a href="http://www.apache.org/licenses/LICENSE-2.0" target="_blank">Licensed under the Apache License, Version 2.0</a>.<br><a href="/hmc/licenses/NOTICE.txt" target="_blank">See third-party tools/resources that Ambari uses and their respective authors</a>');
-    $('.navbar a.help').attr('href', 'http://incubator.apache.org/ambari/install.html');
+    document.title = APP_NAME;
+    $('#brand').html(APP_NAME);
+    $('#footer').html(FOOTER_TEXT);
+    $('.navbar a.help').attr('href', HELP_URL);
+    $('#docsLink').attr('href', DOCS_URL);
     self.clearTimeout(to);
     a.refreshPage();
     a.setEventDelegation();
@@ -92,7 +99,7 @@
       context = "dashboard";
       collection = "hdp";
       collection2 = "all";
-      alertParam = "nok";
+      alertParam = "all";
     } else if (page == "HDFS"){
       context = "hdfs";
       collection = "hdp";
@@ -513,7 +520,7 @@
       a.hideGraphPopup();
     } else if(target.parentNode){
       var targetParentId = target.parentNode.id;
-      if (targetParentId == "HDFS" || targetParentId == "MAPREDUCE" || targetParentId == "HBASE" || targetParentId == "ZOOKEEPER" || targetParentId == "HIVE-METASTORE" || targetParentId == "OOZIE" || targetParentId == "TEMPLETON") {
+      if (targetParentId == "HDFS" || targetParentId == "MAPREDUCE" || targetParentId == "HBASE" || targetParentId == "ZOOKEEPER" || targetParentId == "HIVE-METASTORE" || targetParentId == "OOZIE" || targetParentId == "TEMPLETON" || targetParentId == "PUPPET") {
         a.showAlerts(target);
       }
     }
@@ -685,7 +692,7 @@
       var hdfsCritCount = 0, hdfsWarnCount = 0, mrCritCount = 0, mrWarnCount = 0, 
       hbaseCritCount = 0, hbaseWarnCount = 0, zkCritCount = 0, zkWarnCount = 0, 
       hcatCritCount = 0, hcatWarnCount = 0, oozieWarnCount = 0, oozieCritCount = 0,
-                        templetonWarnCount = 0, templetonCritCount = 0;
+      templetonWarnCount = 0, templetonCritCount = 0, puppetWarnCount = 0, puppetCritCount = 0;
       
       //Set firstService in the table
       var servicestates = response.servicestates;
@@ -743,11 +750,14 @@
               converted.last_hard_state_change = a.convertToDDHHMM(actualTime - alerts[i].last_hard_state_change);
               
               // Plugin Output
-              converted.plugin_output = alerts[i].plugin_output;
-              
+              if (alerts[i].service_type === 'PUPPET' && alerts[i].plugin_output.toLowerCase().indexOf('connection refused') >= 0) {
+                converted.plugin_output = 'Puppet agent down on ' + alerts[i].host_name;
+              } else {
+                converted.plugin_output = alerts[i].plugin_output;
+              }
               filtered.push(converted);
             }
-            
+
             // Step 2 out of 3: Increment Counters for Alert Summary Table
             if (alerts[i].last_hard_state == 1) {
               if(alerts[i].service_type == "HDFS"){
@@ -764,6 +774,8 @@
                 oozieWarnCount++;
               } else if(alerts[i].service_type == "TEMPLETON"){
                 templetonWarnCount++;
+              } else if(alerts[i].service_type == "PUPPET"){
+                puppetWarnCount++;
               }
             } else if (alerts[i].last_hard_state == 2) {
               if(alerts[i].service_type == "HDFS"){
@@ -780,6 +792,8 @@
                 oozieCritCount++;
               } else if(alerts[i].service_type == "TEMPLETON"){
                 templetonCritCount++;
+              } else if(alerts[i].service_type == "PUPPET"){
+                puppetCritCount++;
               }
             }
             
@@ -821,9 +835,9 @@
 
           // Refresh the caption to indicate service name.
           if(page == "HDFS" || page == "MAPREDUCE" || page == "HBASE"){
-            $("#alertsGrid").jqGrid('setCaption',"Configured Alerts (<a href=\"\" target=\"\"></a>)");
+            $("#alertsGrid").jqGrid('setCaption', 'Configured Alerts (<a href="' + TROUBLESHOOT_URL + '" target="_blank">troubleshoot?</a>)');
           } else {
-            $("#alertsGrid").jqGrid('setCaption',targetId+" Alerts (<a href=\"\" target=\"\"></a>)");
+            $("#alertsGrid").jqGrid('setCaption', targetId + ' Alerts (<a href="' + TROUBLESHOOT_URL + '" target="_blank">troubleshoot?</a>)');
           }
           
           // Populate Alerts Grid
@@ -932,6 +946,13 @@
               } else {
                 criticalAlerts.className = "critical";
               }
+            } else if(key == "PUPPET"){
+              criticalAlerts.innerHTML = puppetCritCount;
+              if(puppetCritCount > 0) {
+                criticalAlerts.className = "highlighted-red critical";
+              } else {
+                criticalAlerts.className = "critical";
+              }
             }
             tr.appendChild(criticalAlerts);
             
@@ -986,6 +1007,13 @@
               } else {
                 warnAlerts.className = "warning";
               }
+            } else if(key == "PUPPET"){
+              warnAlerts.innerHTML = puppetWarnCount;
+              if(puppetWarnCount > 0) {
+                warnAlerts.className = "highlighted-orage warning";
+              } else {
+                warnAlerts.className = "warning";
+              }
             }
             tr.appendChild(warnAlerts);
             servicesGrid.append(tr);
diff --git a/mon_dashboard/src/ui/mapreduce.html b/mon_dashboard/src/ui/mapreduce.html
index c54dce0..da39408 100644
--- a/mon_dashboard/src/ui/mapreduce.html
+++ b/mon_dashboard/src/ui/mapreduce.html
@@ -25,13 +25,13 @@
 	<link type="text/css" rel="stylesheet" href="./libs/jqgrid/css/ui.jqgrid.css" media="screen"/>
 	<link type="text/css" rel="stylesheet" href="./libs/jquery.multiselect.css" media="screen"/>
 	<!--[if lt IE 9]><script type="text/javascript" src="./libs/html5shiv/html5shiv.min.js"></script><![endif]-->
-	<link rel="shortcut icon" href="/hmc/images/logo-micro.gif"/>
+	<link rel="shortcut icon" href="./logo/logo-micro.gif"/>
 </head>
 <body>
   <div class="navbar">
     <div class="navbar-inner">
       <div class="container">
-        <a href="/hmc/html"><img id="logo" src="/hmc/images/logo-small.gif"></a>
+        <a href="/hmc/html"><img id="logo" src="./logo/logo-small.gif"></a>
         <a id="brand" class="brand" href="/hmc/html"></a>
         <ul class="nav">
           <li><a href="/hmc/html">Cluster Management</a></li>
@@ -55,11 +55,11 @@
 		</nav>
 		
 		<section id="links">
-			<a class="roundedBox" href="" target="_blank">Post Feedback</a>
-			<a class="roundedBox" href="" target="_blank">Documentation</a>
-			<a id="nagios" class="roundedBox" href="" target="_blank">Nagios</a>
-			<a id="ganglia" class="roundedBox" href="" target="_blank">Ganglia</a>
-                        <a id="oozie" class="hiddenLink" href="" target="_blank">Oozie</a>
+      <a class="roundedBox" href="http://hortonworks.com/community/forums/" target="_blank">Post Feedback</a>
+      <a id="docsLink" class="roundedBox" href="javascript:void(0)" target="_blank">Documentation</a>
+      <a id="nagios" class="roundedBox" href="" target="_blank">Nagios</a>
+      <a id="ganglia" class="roundedBox" href="" target="_blank">Ganglia</a>
+      <a id="oozie" class="hiddenLink" href="" target="_blank">Oozie</a>
 		</section>
 		<section class="clear"></section>
 	</section>
diff --git a/mon_dashboard/test/dataServices/jmx/data/sample_hbasemaster_jmx.json b/mon_dashboard/test/dataServices/jmx/data/sample_hbasemaster_jmx.json
index 7ba9a38..08e1472 100644
--- a/mon_dashboard/test/dataServices/jmx/data/sample_hbasemaster_jmx.json
+++ b/mon_dashboard/test/dataServices/jmx/data/sample_hbasemaster_jmx.json
@@ -826,7 +826,7 @@
     "name" : "hadoop:service=HBase,name=Info",
     "modelerType" : "org.apache.hadoop.hbase.metrics.HBaseInfo$HBaseInfoMBean",
     "revision" : "a23f8636efd6dd9d37f3a15d83f2396819509502",
-    "hdfsUser" : "foo",
+    "hdfsUser" : "hortonfo",
     "hdfsDate" : "Fri Dec 16 20:01:27 UTC 2011",
     "hdfsUrl" : "https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0",
     "date" : "Tue Feb 14 10:12:33 PST 2012",
diff --git a/mon_dashboard/test/nagios/plugins/test_sys_logger.py b/mon_dashboard/test/nagios/plugins/test_sys_logger.py
new file mode 100644
index 0000000..7e523f8
--- /dev/null
+++ b/mon_dashboard/test/nagios/plugins/test_sys_logger.py
@@ -0,0 +1,398 @@
+#!/usr/bin/python
+
+import sys
+sys.path.append('../src')
+
+import sys_logger
+
+tests_passed = 0
+tests_failed = 0
+def test_log_tvi_msg(msg):
+    global tests_passed, tests_failed
+    if msg == expected_log_msg:
+        print 'Test Passed'
+        tests_passed += 1
+    else:
+        print '*** TEST FAILED ***'
+        print 'Expected MSG: {0}'.format(expected_log_msg)
+        print 'Actual MSG  : {0}'.format(msg)
+        tests_failed += 1
+
+sys_logger.log_tvi_msg = test_log_tvi_msg
+
+def test(tvi_rule, expected_msg, arg1, arg2, arg3, arg4, arg5):
+    sys.stdout.write(tvi_rule + ': ')
+    global expected_log_msg
+    expected_log_msg = expected_msg
+    sys_logger.generate_tvi_log_msg(arg1, arg2, arg3, arg4, arg5)
+
+def summary():
+    total_tests = tests_passed + tests_failed
+    print '\nTests Run: {0}'.format(total_tests)
+    print 'Passed: {0}, Failed: {1}'.format(tests_passed, tests_failed)
+    if not tests_failed:
+        print 'SUCCESS! All tests pass.'
+
+
+# Hadoop_Host_Down
+test('Hadoop_Host_Down',
+     'Critical: Hadoop: host_down# Event Host=MY_HOST(CRITICAL), PING FAILED - Packet loss = 100%, RTA = 0.00 ms',
+     'HARD', '1', 'CRITICAL', 'Host::Ping', 'Event Host=MY_HOST(CRITICAL), PING FAILED - Packet loss = 100%, RTA = 0.00 ms')
+
+test('Hadoop_Host_Down:OK',
+    'OK: Hadoop: host_down_ok# Event Host=MY_HOST(OK), PING SUCCESS - Packet loss = 0%, RTA = 1.00 ms',
+    'HARD', '1', 'OK', 'Host::Ping', 'Event Host=MY_HOST(OK), PING SUCCESS - Packet loss = 0%, RTA = 1.00 ms')
+
+# Hadoop_Master_Daemon_CPU_Utilization
+test('Hadoop_Master_Daemon_CPU_Utilization',
+     'Critical: Hadoop: master_cpu_utilization# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%',
+     'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBaseMaster CPU utilization',
+     'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%')
+
+test('Hadoop_Master_Daemon_CPU_Utilization:Degraded',
+    'Degraded: Hadoop: master_cpu_utilization# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%',
+    'HARD', '1', 'WARNING', 'HBASEMASTER::HBaseMaster CPU utilization',
+    'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(CRITICAL), 4 CPU, average load 2.5%  200%')
+
+test('Hadoop_Master_Daemon_CPU_Utilization:OK',
+    'OK: Hadoop: master_cpu_utilization_ok# Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(OK), 4 CPU, average load 2.5%  200%',
+    'HARD', '1', 'OK', 'HBASEMASTER::HBaseMaster CPU utilization',
+    'Event Host=MY_HOST Service Description=HBASEMASTER::HBaseMaster CPU utilization(OK), 4 CPU, average load 2.5%  200%')
+
+# Hadoop_HDFS_Percent_Capacity
+test('Hadoop_HDFS_Percent_Capacity',
+     'Critical: Hadoop: hdfs_percent_capacity# Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(CRITICAL),DFSUsedGB:0.1, DFSTotalGB:1568.7',
+     'HARD', '1', 'CRITICAL', 'HDFS::HDFS Capacity utilization',
+     'Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(CRITICAL),DFSUsedGB:0.1, DFSTotalGB:1568.7')
+
+test('Hadoop_HDFS_Percent_Capacity:OK',
+    'OK: Hadoop: hdfs_percent_capacity_ok# Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(OK),DFSUsedGB:0.1, DFSTotalGB:1568.7',
+    'HARD', '1', 'OK', 'HDFS::HDFS Capacity utilization',
+    'Event Host=MY_HOST Service Description=HDFS::HDFS Capacity utilization(OK),DFSUsedGB:0.1, DFSTotalGB:1568.7')
+
+# Hadoop_HDFS_Corrupt_Missing_Blocks
+test('Hadoop_HDFS_Corrupt_Missing_Blocks',
+     'Critical: Hadoop: hdfs_block# Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(CRITICAL), corrupt_blocks:0, missing_blocks:0, total_blocks:147',
+     'HARD', '1', 'CRITICAL', 'HDFS::Corrupt/Missing blocks',
+     'Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(CRITICAL), corrupt_blocks:0, missing_blocks:0, total_blocks:147')
+
+test('Hadoop_HDFS_Corrupt_Missing_Blocks:OK',
+    'OK: Hadoop: hdfs_block_ok# Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(OK), corrupt_blocks:0, missing_blocks:0, total_blocks:147',
+    'HARD', '1', 'OK', 'HDFS::Corrupt/Missing blocks',
+    'Event Host=MY_HOST Service Description=HDFS::Corrupt/Missing blocks(OK), corrupt_blocks:0, missing_blocks:0, total_blocks:147')
+
+# Hadoop_NameNode_Edit_Log_Dir_Write
+test('Hadoop_NameNode_Edit_Log_Dir_Write',
+     'Critical: Hadoop: namenode_edit_log_write# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Edit logs directory status', 'SERVICE MSG')
+
+test('Hadoop_NameNode_Edit_Log_Dir_Write:OK',
+    'OK: Hadoop: namenode_edit_log_write_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAMENODE::Namenode Edit logs directory status', 'SERVICE MSG')
+
+# Hadoop_DataNode_Down
+test('Hadoop_DataNode_Down',
+     'Critical: Hadoop: datanode_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HDFS::Percent DataNodes down','SERVICE MSG')
+
+test('Hadoop_DataNode_Down:OK',
+    'OK: Hadoop: datanode_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HDFS::Percent DataNodes down','SERVICE MSG')
+
+# Hadoop_DataNode_Process_Down
+test('Hadoop_DataNode_Process_Down',
+     'Critical: Hadoop: datanode_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'DATANODE::Process down', 'SERVICE MSG')
+
+test('Hadoop_DataNode_Process_Down:OK',
+    'OK: Hadoop: datanode_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'DATANODE::Process down', 'SERVICE MSG')
+
+# Hadoop_Percent_DataNodes_Storage_Full
+test('Hadoop_Percent_DataNodes_Storage_Full',
+     'Critical: Hadoop: datanodes_percent_storage_full# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HDFS::Percent DataNodes storage full', 'SERVICE MSG')
+
+test('Hadoop_Percent_DataNodes_Storage_Full:OK',
+    'OK: Hadoop: datanodes_percent_storage_full_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HDFS::Percent DataNodes storage full', 'SERVICE MSG')
+
+# Hadoop_NameNode_Process_Down
+test('Hadoop_NameNode_Process_Down:CRITICAL',
+     'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
+
+test('Hadoop_NameNode_Process_Down:WARNING',
+    'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
+    'HARD', '1', 'WARNING', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
+
+test('Hadoop_NameNode_Process_Down:UNKNOWN',
+    'Fatal: Hadoop: namenode_process_down# SERVICE MSG',
+    'HARD', '1', 'UNKNOWN', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
+
+test('Hadoop_NameNode_Process_Down:OK',
+    'OK: Hadoop: namenode_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAMENODE::Namenode Process down', 'SERVICE MSG')
+
+# Hadoop_Secondary_NameNode_Process_Down
+test('Hadoop_Secondary_NameNode_Process_Down',
+    'Critical: Hadoop: secondary_namenode_process_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'NAMENODE::Secondary Namenode Process down', 'SERVICE MSG')
+
+test('Hadoop_Secondary_NameNode_Process_Down:OK',
+    'OK: Hadoop: secondary_namenode_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAMENODE::Secondary Namenode Process down', 'SERVICE MSG')
+
+# Hadoop_NameNode_RPC_Latency
+test('Hadoop_NameNode_RPC_Latency',
+     'Critical: Hadoop: namenode_rpc_latency# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
+
+test('Hadoop_NameNode_RPC_Latency:Degraded',
+    'Degraded: Hadoop: namenode_rpc_latency# SERVICE MSG',
+    'HARD', '1', 'WARNING', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
+
+test('Hadoop_NameNode_RPC_Latency:OK',
+    'OK: Hadoop: namenode_rpc_latency_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HDFS::Namenode RPC Latency', 'SERVICE MSG')
+
+# Hadoop_DataNodes_Storage_Full
+test('Hadoop_DataNodes_Storage_Full',
+     'Critical: Hadoop: datanodes_storage_full# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'DATANODE::Storage full', 'SERVICE MSG')
+
+test('Hadoop_DataNodes_Storage_Full:OK',
+    'OK: Hadoop: datanodes_storage_full_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'DATANODE::Storage full', 'SERVICE MSG')
+
+# Hadoop_JobTracker_Process_Down
+test('Hadoop_JobTracker_Process_Down',
+     'Critical: Hadoop: jobtracker_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'JOBTRACKER::Jobtracker Process down', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_Process_Down:OK',
+    'OK: Hadoop: jobtracker_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'JOBTRACKER::Jobtracker Process down', 'SERVICE MSG')
+
+# Hadoop_JobTracker_RPC_Latency
+test('Hadoop_JobTracker_RPC_Latency',
+     'Critical: Hadoop: jobtracker_rpc_latency# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_RPC_Latency:Degraded',
+    'Degraded: Hadoop: jobtracker_rpc_latency# SERVICE MSG',
+    'HARD', '1', 'WARNING', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_RPC_Latency:OK',
+    'OK: Hadoop: jobtracker_rpc_latency_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'MAPREDUCE::JobTracker RPC Latency', 'SERVICE MSG')
+
+# Hadoop_JobTracker_CPU_Utilization
+test('Hadoop_JobTracker_CPU_Utilization',
+    'Critical: Hadoop: jobtracker_cpu_utilization# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_CPU_Utilization:Degraded',
+    'Degraded: Hadoop: jobtracker_cpu_utilization# SERVICE MSG',
+    'HARD', '1', 'WARNING', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_CPU_Utilization:OK',
+    'OK: Hadoop: jobtracker_cpu_utilization_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'JOBTRACKER::Jobtracker CPU utilization', 'SERVICE MSG')
+
+# Hadoop_TaskTracker_Down
+test('Hadoop_TaskTracker_Down',
+     'Critical: Hadoop: tasktrackers_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'MAPREDUCE::Percent TaskTrackers down', 'SERVICE MSG')
+
+test('Hadoop_TaskTracker_Down:OK',
+    'OK: Hadoop: tasktrackers_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'MAPREDUCE::Percent TaskTrackers down', 'SERVICE MSG')
+
+# Hadoop_TaskTracker_Process_Down
+test('Hadoop_TaskTracker_Process_Down',
+     'Critical: Hadoop: tasktracker_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'TASKTRACKER::Process down', 'SERVICE MSG')
+
+test('Hadoop_TaskTracker_Process_Down:OK',
+    'OK: Hadoop: tasktracker_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'TASKTRACKER::Process down', 'SERVICE MSG')
+
+# Hadoop_HBaseMaster_Process_Down
+test('Hadoop_HBaseMaster_Process_Down',
+     'Critical: Hadoop: hbasemaster_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBaseMaster Process down', 'SERVICE MSG')
+
+test('Hadoop_HBaseMaster_Process_Down:OK',
+    'OK: Hadoop: hbasemaster_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HBASEMASTER::HBaseMaster Process down', 'SERVICE MSG')
+
+# Hadoop_RegionServer_Process_Down
+test('Hadoop_RegionServer_Process_Down',
+     'Critical: Hadoop: regionserver_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'REGIONSERVER::Process down', 'SERVICE MSG')
+
+test('Hadoop_RegionServer_Process_Down:OK',
+    'OK: Hadoop: regionserver_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'REGIONSERVER::Process down', 'SERVICE MSG')
+
+# Hadoop_RegionServer_Down
+test('Hadoop_RegionServer_Down',
+     'Critical: Hadoop: regionservers_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HBASE::Percent region servers down', 'SERVICE MSG')
+
+test('Hadoop_RegionServer_Down:OK',
+    'OK: Hadoop: regionservers_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HBASE::Percent region servers down', 'SERVICE MSG')
+
+# Hadoop_Hive_Metastore_Process_Down
+test('Hadoop_Hive_Metastore_Process_Down',
+     'Critical: Hadoop: hive_metastore_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'HIVE-METASTORE::HIVE-METASTORE status check', 'SERVICE MSG')
+
+test('Hadoop_Hive_Metastore_Process_Down:OK',
+    'OK: Hadoop: hive_metastore_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HIVE-METASTORE::HIVE-METASTORE status check', 'SERVICE MSG')
+
+# Hadoop_Zookeeper_Down
+test('Hadoop_Zookeeper_Down',
+     'Critical: Hadoop: zookeepers_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'ZOOKEEPER::Percent zookeeper servers down', 'SERVICE MSG')
+
+test('Hadoop_Zookeeper_Down:OK',
+    'OK: Hadoop: zookeepers_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'ZOOKEEPER::Percent zookeeper servers down', 'SERVICE MSG')
+
+# Hadoop_Zookeeper_Process_Down
+test('Hadoop_Zookeeper_Process_Down',
+     'Critical: Hadoop: zookeeper_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'ZKSERVERS::ZKSERVERS Process down', 'SERVICE MSG')
+
+test('Hadoop_Zookeeper_Process_Down:OK',
+    'OK: Hadoop: zookeeper_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'ZKSERVERS::ZKSERVERS Process down', 'SERVICE MSG')
+
+# Hadoop_Oozie_Down
+test('Hadoop_Oozie_Down',
+     'Critical: Hadoop: oozie_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'OOZIE::Oozie status check', 'SERVICE MSG')
+
+test('Hadoop_Oozie_Down:OK',
+    'OK: Hadoop: oozie_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'OOZIE::Oozie status check', 'SERVICE MSG')
+
+# Hadoop_Templeton_Down
+test('Hadoop_Templeton_Down',
+     'Critical: Hadoop: templeton_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'TEMPLETON::Templeton status check', 'SERVICE MSG')
+
+test('Hadoop_Templeton_Down:OK',
+    'OK: Hadoop: templeton_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'TEMPLETON::Templeton status check', 'SERVICE MSG')
+
+# Hadoop_Puppet_Down
+test('Hadoop_Puppet_Down',
+     'Critical: Hadoop: puppet_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'PUPPET::Puppet agent down', 'SERVICE MSG')
+
+test('Hadoop_Puppet_Down:OK',
+    'OK: Hadoop: puppet_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'PUPPET::Puppet agent down', 'SERVICE MSG')
+
+# Hadoop_Nagios_Status_Log_Stale
+test('Hadoop_Nagios_Status_Log_Stale',
+     'Critical: Hadoop: nagios_status_log_stale# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'NAGIOS::Nagios status log staleness', 'SERVICE MSG')
+
+test('Hadoop_Nagios_Status_Log_Stale:OK',
+    'OK: Hadoop: nagios_status_log_stale_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAGIOS::Nagios status log staleness', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Process_Down
+test('Hadoop_Ganglia_Process_Down',
+     'Critical: Hadoop: ganglia_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia [gmetad] Process down', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Process_Down:OK',
+    'OK: Hadoop: ganglia_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia [gmetad] Process down', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Collector_Process_Down
+test('Hadoop_Ganglia_Collector_Process_Down',
+     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Collector_Process_Down:OK',
+    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Collector_Process_Down
+test('Hadoop_Ganglia_Collector_Process_Down',
+     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Collector_Process_Down:OK',
+    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Collector_Process_Down
+test('Hadoop_Ganglia_Collector_Process_Down',
+     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for namenode', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Collector_Process_Down:OK',
+    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for namenode', 'SERVICE MSG')
+
+# Hadoop_Ganglia_Collector_Process_Down
+test('Hadoop_Ganglia_Collector_Process_Down',
+     'Critical: Hadoop: ganglia_collector_process_down# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'GANGLIA::Ganglia collector [gmond] Process down alert for slaves', 'SERVICE MSG')
+
+test('Hadoop_Ganglia_Collector_Process_Down:OK',
+    'OK: Hadoop: ganglia_collector_process_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'GANGLIA::Ganglia collector [gmond] Process down alert for slaves', 'SERVICE MSG')
+
+# Hadoop_UNKNOWN_MSG
+test('Hadoop_UNKNOWN_MSG',
+     'Critical: Hadoop: HADOOP_UNKNOWN_MSG# SERVICE MSG',
+     'HARD', '1', 'CRITICAL', 'ANY UNKNOWN SERVICE', 'SERVICE MSG')
+
+# HBase UI Down
+test('Hadoop_HBase_UI_Down',
+    'Critical: Hadoop: hbase_ui_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'HBASEMASTER::HBase Web UI down', 'SERVICE MSG')
+
+test('Hadoop_HBase_UI_Down:OK',
+    'OK: Hadoop: hbase_ui_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'HBASEMASTER::HBase Web UI down', 'SERVICE MSG')
+
+# Namenode UI Down
+test('Hadoop_NameNode_UI_Down',
+    'Critical: Hadoop: namenode_ui_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'NAMENODE::Namenode Web UI down', 'SERVICE MSG')
+
+test('Hadoop_NameNode_UI_Down:OK',
+    'OK: Hadoop: namenode_ui_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'NAMENODE::Namenode Web UI down', 'SERVICE MSG')
+
+# JobHistory UI Down
+test('Hadoop_JobHistory_UI_Down',
+    'Critical: Hadoop: jobhistory_ui_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'JOBTRACKER::JobHistory Web UI down', 'SERVICE MSG')
+
+test('Hadoop_JobHistory_UI_Down:OK',
+    'OK: Hadoop: jobhistory_ui_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'JOBTRACKER::JobHistory Web UI down', 'SERVICE MSG')
+
+# JobTracker UI Down
+test('Hadoop_JobTracker_UI_Down',
+    'Critical: Hadoop: jobtracker_ui_down# SERVICE MSG',
+    'HARD', '1', 'CRITICAL', 'JOBTRACKER::JobTracker Web UI down', 'SERVICE MSG')
+
+test('Hadoop_JobTracker_UI_Down:OK',
+    'OK: Hadoop: jobtracker_ui_down_ok# SERVICE MSG',
+    'HARD', '1', 'OK', 'JOBTRACKER::JobTracker Web UI down', 'SERVICE MSG')
+
+summary()
+
diff --git a/rat-excludes.txt b/rat-excludes.txt
index a0f9fb3..2b8f026 100644
--- a/rat-excludes.txt
+++ b/rat-excludes.txt
@@ -20,396 +20,3 @@
 [^ ]*/*hadoop-servicegroups.cfg.erb
 [^ ]*/*hadoop-hostgroups.cfg.erb
 [^ ]*/*regionservers.erb
-[^ ]*/*cluster_configuration.json.erb
-[^ ]*/*slaves.erb
-[^ ]*/*configuration.xsl.erb
-[^ ]*/*jquery-1.5.2.min.js
-[^ ]*/*jquery-1.7.1.js
-[^ ]*/*jquery.multiselect.css
-[^ ]*/*jquery.multiselect.js
-[^ ]*/*html5shiv-printshiv.js
-[^ ]*/*html5shiv.js
-[^ ]*/*html5shiv.min.js
-[^ ]*/*ellipsis-xbl.xml
-[^ ]*/*ui.jqgrid.css
-[^ ]*/*jquery-1.5.2.min.js
-[^ ]*/*jquery.jqGrid.min.js
-[^ ]*/*jquery.jqGrid.src.js
-[^ ]*/*grid.locale-ar.js
-[^ ]*/*grid.locale-bg.js
-[^ ]*/*grid.locale-bg1251.js
-[^ ]*/*grid.locale-cat.js
-[^ ]*/*grid.locale-cn.js
-[^ ]*/*grid.locale-cs.js
-[^ ]*/*grid.locale-da.js
-[^ ]*/*grid.locale-de.js
-[^ ]*/*grid.locale-el.js
-[^ ]*/*grid.locale-en.js
-[^ ]*/*grid.locale-es.js
-[^ ]*/*grid.locale-fa.js
-[^ ]*/*grid.locale-fi.js
-[^ ]*/*grid.locale-fr.js
-[^ ]*/*grid.locale-gl.js
-[^ ]*/*grid.locale-he.js
-[^ ]*/*grid.locale-hr.js
-[^ ]*/*grid.locale-hr1250.js
-[^ ]*/*grid.locale-hu.js
-[^ ]*/*grid.locale-is.js
-[^ ]*/*grid.locale-it.js
-[^ ]*/*grid.locale-ja.js
-[^ ]*/*grid.locale-lt.js
-[^ ]*/*grid.locale-mne.js
-[^ ]*/*grid.locale-nl.js
-[^ ]*/*grid.locale-no.js
-[^ ]*/*grid.locale-pl.js
-[^ ]*/*grid.locale-pt-br.js
-[^ ]*/*grid.locale-pt.js
-[^ ]*/*grid.locale-ro.js
-[^ ]*/*grid.locale-ru.js
-[^ ]*/*grid.locale-sk.js
-[^ ]*/*grid.locale-sr-latin.js
-[^ ]*/*grid.locale-sr.js
-[^ ]*/*grid.locale-sv.js
-[^ ]*/*grid.locale-th.js
-[^ ]*/*grid.locale-tr.js
-[^ ]*/*grid.locale-ua.js
-[^ ]*/*grid.addons.js
-[^ ]*/*grid.postext.js
-[^ ]*/*grid.setcolumns.js
-[^ ]*/*jquery.contextmenu.js
-[^ ]*/*jquery.searchFilter.js
-[^ ]*/*jquery.tablednd.js
-[^ ]*/*searchFilter.css
-[^ ]*/*ui.multiselect.css
-[^ ]*/*ui.multiselect.js
-[^ ]*/*JsonXml.js
-[^ ]*/*grid.base.js
-[^ ]*/*grid.celledit.js
-[^ ]*/*grid.common.js
-[^ ]*/*grid.custom.js
-[^ ]*/*grid.filter.js
-[^ ]*/*grid.formedit.js
-[^ ]*/*grid.grouping.js
-[^ ]*/*grid.import.js
-[^ ]*/*grid.inlinedit.js
-[^ ]*/*grid.jqueryui.js
-[^ ]*/*grid.loader.js
-[^ ]*/*grid.subgrid.js
-[^ ]*/*grid.tbltogrid.js
-[^ ]*/*grid.treegrid.js
-[^ ]*/*jqDnR.js
-[^ ]*/*jqModal.js
-[^ ]*/*jquery.fmatter.js
-[^ ]*/*ellipsis-xbl.xml
-[^ ]*/*ui.jqgrid.css
-[^ ]*/*ui.multiselect.css
-[^ ]*/*grid.locale-ar.js
-[^ ]*/*grid.locale-bg.js
-[^ ]*/*grid.locale-bg1251.js
-[^ ]*/*grid.locale-cat.js
-[^ ]*/*grid.locale-cn.js
-[^ ]*/*grid.locale-cs.js
-[^ ]*/*grid.locale-da.js
-[^ ]*/*grid.locale-de.js
-[^ ]*/*grid.locale-dk.js
-[^ ]*/*grid.locale-el.js
-[^ ]*/*grid.locale-en.js
-[^ ]*/*grid.locale-es.js
-[^ ]*/*grid.locale-fa.js
-[^ ]*/*grid.locale-fi.js
-[^ ]*/*grid.locale-fr.js
-[^ ]*/*grid.locale-gl.js
-[^ ]*/*grid.locale-he.js
-[^ ]*/*grid.locale-hr.js
-[^ ]*/*grid.locale-hr1250.js
-[^ ]*/*grid.locale-hu.js
-[^ ]*/*grid.locale-is.js
-[^ ]*/*grid.locale-it.js
-[^ ]*/*grid.locale-ja.js
-[^ ]*/*grid.locale-lt.js
-[^ ]*/*grid.locale-mne.js
-[^ ]*/*grid.locale-nl.js
-[^ ]*/*grid.locale-no.js
-[^ ]*/*grid.locale-pl.js
-[^ ]*/*grid.locale-pt-br.js
-[^ ]*/*grid.locale-pt.js
-[^ ]*/*grid.locale-ro.js
-[^ ]*/*grid.locale-ru.js
-[^ ]*/*grid.locale-sk.js
-[^ ]*/*grid.locale-sr-latin.js
-[^ ]*/*grid.locale-sr.js
-[^ ]*/*grid.locale-sv.js
-[^ ]*/*grid.locale-th.js
-[^ ]*/*grid.locale-tr.js
-[^ ]*/*grid.locale-ua.js
-[^ ]*/*index.html
-[^ ]*/*jquery-ui-1.7.3.custom.css
-[^ ]*/*jquery-1.3.2.js
-[^ ]*/*demos.css
-[^ ]*/*index.html
-[^ ]*/*collapsible.html
-[^ ]*/*custom-icons.html
-[^ ]*/*default.html
-[^ ]*/*fillspace.html
-[^ ]*/*index.html
-[^ ]*/*mouseover.html
-[^ ]*/*no-auto-height.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*alt-field.html
-[^ ]*/*buttonbar.html
-[^ ]*/*date-formats.html
-[^ ]*/*default.html
-[^ ]*/*dropdown-month-year.html
-[^ ]*/*icon-trigger.html
-[^ ]*/*index.html
-[^ ]*/*inline.html
-[^ ]*/*localization.html
-[^ ]*/*min-max.html
-[^ ]*/*multiple-calendars.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*modal-confirmation.html
-[^ ]*/*modal-form.html
-[^ ]*/*modal-message.html
-[^ ]*/*modal.html
-[^ ]*/*constrain-movement.html
-[^ ]*/*cursor-style.html
-[^ ]*/*default.html
-[^ ]*/*delay-start.html
-[^ ]*/*events.html
-[^ ]*/*handle.html
-[^ ]*/*index.html
-[^ ]*/*revert.html
-[^ ]*/*scroll.html
-[^ ]*/*snap-to.html
-[^ ]*/*sortable.html
-[^ ]*/*visual-feedback.html
-[^ ]*/*accepted-elements.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*photo-manager.html
-[^ ]*/*propagation.html
-[^ ]*/*revert.html
-[^ ]*/*visual-feedback.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*animated.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*resize.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*animate.html
-[^ ]*/*aspect-ratio.html
-[^ ]*/*constrain-area.html
-[^ ]*/*default.html
-[^ ]*/*delay-start.html
-[^ ]*/*index.html
-[^ ]*/*max-min.html
-[^ ]*/*snap-to-grid.html
-[^ ]*/*synchronous-resize.html
-[^ ]*/*visual-feedback.html
-[^ ]*/*default.html
-[^ ]*/*display-grid.html
-[^ ]*/*index.html
-[^ ]*/*serialize.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*colorpicker.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*multiple-vertical.html
-[^ ]*/*range-vertical.html
-[^ ]*/*range.html
-[^ ]*/*rangemax.html
-[^ ]*/*rangemin.html
-[^ ]*/*slider-vertical.html
-[^ ]*/*steps.html
-[^ ]*/*connect-lists-through-tabs.html
-[^ ]*/*connect-lists.html
-[^ ]*/*default.html
-[^ ]*/*delay-start.html
-[^ ]*/*display-grid.html
-[^ ]*/*empty-lists.html
-[^ ]*/*index.html
-[^ ]*/*items.html
-[^ ]*/*placeholder.html
-[^ ]*/*portlets.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*ajax.html
-[^ ]*/*collapsible.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*mouseover.html
-[^ ]*/*sortable.html
-[^ ]*/*vertical.html
-[^ ]*/*ajax/content1.html
-[^ ]*/*ajax/content2.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*default.html
-[^ ]*/*index.html
-[^ ]*/*accordion.html
-[^ ]*/*addClass.html
-[^ ]*/*animate.html
-[^ ]*/*datepicker.html
-[^ ]*/*dialog.html
-[^ ]*/*draggable.html
-[^ ]*/*droppable.html
-[^ ]*/*effect.html
-[^ ]*/*hide.html
-[^ ]*/*progressbar.html
-[^ ]*/*removeClass.html
-[^ ]*/*resizable.html
-[^ ]*/*selectable.html
-[^ ]*/*show.html
-[^ ]*/*slider.html
-[^ ]*/*sortable.html
-[^ ]*/*switchClass.html
-[^ ]*/*tabs.html
-[^ ]*/*toggle.html
-[^ ]*/*toggleClass.html
-[^ ]*/*jquery.bgiframe.js
-[^ ]*/*jquery.bgiframe.min.js
-[^ ]*/*jquery.bgiframe.pack.js
-[^ ]*/*index.html
-[^ ]*/*index.html
-[^ ]*/*jquery.cookie.js
-[^ ]*/*jquery.cookie.min.js
-[^ ]*/*jquery.cookie.pack.js
-[^ ]*/*jsdiff.js
-[^ ]*/*testrunner.js
-[^ ]*/*testsuite.css
-[^ ]*/*jquery.simulate.js
-[^ ]*/*ui.accordion.css
-[^ ]*/*ui.all.css
-[^ ]*/*ui.base.css
-[^ ]*/*ui.core.css
-[^ ]*/*ui.datepicker.css
-[^ ]*/*ui.dialog.css
-[^ ]*/*ui.progressbar.css
-[^ ]*/*ui.resizable.css
-[^ ]*/*ui.slider.css
-[^ ]*/*ui.tabs.css
-[^ ]*/*ui.theme.css
-[^ ]*/*jquery-ui-1.7.3.custom.css
-[^ ]*/*ui.accordion.css
-[^ ]*/*ui.all.css
-[^ ]*/*ui.base.css
-[^ ]*/*ui.core.css
-[^ ]*/*ui.datepicker.css
-[^ ]*/*ui.dialog.css
-[^ ]*/*ui.progressbar.css
-[^ ]*/*ui.resizable.css
-[^ ]*/*ui.slider.css
-[^ ]*/*ui.tabs.css
-[^ ]*/*ui.theme.css
-[^ ]*/*effects.blind.js
-[^ ]*/*effects.bounce.js
-[^ ]*/*effects.clip.js
-[^ ]*/*effects.core.js
-[^ ]*/*effects.drop.js
-[^ ]*/*effects.explode.js
-[^ ]*/*effects.fold.js
-[^ ]*/*effects.highlight.js
-[^ ]*/*effects.pulsate.js
-[^ ]*/*effects.scale.js
-[^ ]*/*effects.shake.js
-[^ ]*/*effects.slide.js
-[^ ]*/*effects.transfer.js
-[^ ]*/*jquery-ui-1.7.3.custom.js
-[^ ]*/*ui.accordion.js
-[^ ]*/*ui.core.js
-[^ ]*/*ui.datepicker.js
-[^ ]*/*ui.dialog.js
-[^ ]*/*ui.draggable.js
-[^ ]*/*ui.droppable.js
-[^ ]*/*ui.progressbar.js
-[^ ]*/*ui.resizable.js
-[^ ]*/*ui.selectable.js
-[^ ]*/*ui.slider.js
-[^ ]*/*ui.sortable.js
-[^ ]*/*ui.tabs.js
-[^ ]*/*jquery-ui-i18n.js
-[^ ]*/*ui.datepicker-ar.js
-[^ ]*/*ui.datepicker-bg.js
-[^ ]*/*ui.datepicker-ca.js
-[^ ]*/*ui.datepicker-cs.js
-[^ ]*/*ui.datepicker-da.js
-[^ ]*/*ui.datepicker-de.js
-[^ ]*/*ui.datepicker-el.js
-[^ ]*/*ui.datepicker-eo.js
-[^ ]*/*ui.datepicker-es.js
-[^ ]*/*ui.datepicker-fa.js
-[^ ]*/*ui.datepicker-fi.js
-[^ ]*/*ui.datepicker-fr.js
-[^ ]*/*ui.datepicker-he.js
-[^ ]*/*ui.datepicker-hr.js
-[^ ]*/*ui.datepicker-hu.js
-[^ ]*/*ui.datepicker-hy.js
-[^ ]*/*ui.datepicker-id.js
-[^ ]*/*ui.datepicker-is.js
-[^ ]*/*ui.datepicker-it.js
-[^ ]*/*ui.datepicker-ja.js
-[^ ]*/*ui.datepicker-ko.js
-[^ ]*/*ui.datepicker-lt.js
-[^ ]*/*ui.datepicker-lv.js
-[^ ]*/*ui.datepicker-ms.js
-[^ ]*/*ui.datepicker-nl.js
-[^ ]*/*ui.datepicker-no.js
-[^ ]*/*ui.datepicker-pl.js
-[^ ]*/*ui.datepicker-pt-BR.js
-[^ ]*/*ui.datepicker-ro.js
-[^ ]*/*ui.datepicker-ru.js
-[^ ]*/*ui.datepicker-sk.js
-[^ ]*/*ui.datepicker-sl.js
-[^ ]*/*ui.datepicker-sq.js
-[^ ]*/*ui.datepicker-sr-SR.js
-[^ ]*/*ui.datepicker-sr.js
-[^ ]*/*ui.datepicker-sv.js
-[^ ]*/*ui.datepicker-th.js
-[^ ]*/*ui.datepicker-tr.js
-[^ ]*/*ui.datepicker-uk.js
-[^ ]*/*ui.datepicker-zh-CN.js
-[^ ]*/*ui.datepicker-zh-TW.js
-[^ ]*/*effects.blind.min.js
-[^ ]*/*effects.bounce.min.js
-[^ ]*/*effects.clip.min.js
-[^ ]*/*effects.core.min.js
-[^ ]*/*effects.drop.min.js
-[^ ]*/*effects.explode.min.js
-[^ ]*/*effects.fold.min.js
-[^ ]*/*effects.highlight.min.js
-[^ ]*/*effects.pulsate.min.js
-[^ ]*/*effects.scale.min.js
-[^ ]*/*effects.shake.min.js
-[^ ]*/*effects.slide.min.js
-[^ ]*/*effects.transfer.min.js
-[^ ]*/*ui.accordion.min.js
-[^ ]*/*ui.core.min.js
-[^ ]*/*ui.datepicker.min.js
-[^ ]*/*ui.dialog.min.js
-[^ ]*/*ui.draggable.min.js
-[^ ]*/*ui.droppable.min.js
-[^ ]*/*ui.progressbar.min.js
-[^ ]*/*ui.resizable.min.js
-[^ ]*/*ui.selectable.min.js
-[^ ]*/*ui.slider.min.js
-[^ ]*/*ui.sortable.min.js
-[^ ]*/*ui.tabs.min.js
-[^ ]*/*jquery-1.3.2.min.js
-[^ ]*/*jquery-ui-1.7.3.custom.min.js
-[^ ]*/*g.pie.min.js
-[^ ]*/*g.raphael.min.js
-[^ ]*/*cluster_configuration.json.nohbase
-[^ ]*/*iconic_fill.eot
-[^ ]*/*iconic_stroke.eot
-[^ ]*/*content1.html
-[^ ]*/*content2.html
-[^ ]*/*raphael.js