AMBARI-1126. Change SUSE lzo dependency to only lzo-devel. (nate cole via mahadev)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/trunk@1432284 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/CHANGES.txt b/CHANGES.txt
index fba7756..f5ca01b 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -21,6 +21,8 @@
 
  BUG FIXES
 
+ AMBARI-1126. Change SUSE lzo dependency to only lzo-devel. (nate cole via
+ mahadev)
 
 AMBARI-666 branch (unreleased changes)
 
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
index 01eb87c..6aa9040 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
+++ b/ambari-agent/src/main/puppet/modules/hdp-ganglia/files/stopGmond.sh
@@ -33,7 +33,7 @@
     # Only go ahead with the termination if we could find a running PID.
     if [ -n "${gmondRunningPid}" ]
     then
-      kill -HUP ${gmondRunningPid};
+      kill ${gmondRunningPid};
       echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
     fi
 }
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
index 394c64c..27334ac 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
@@ -118,7 +118,7 @@
   }
   if ($ensure == 'running' or $ensure == 'stopped') {
     hdp::exec { "hdp-gmond service" :
-      command => "$command",
+      command => $command,
       path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
     }
   }
diff --git a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
index eef7434..9adeb15 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
@@ -29,7 +29,8 @@
   } elsif ($service_state == 'uninstalled') {
 
    class { 'hdp-ganglia::server::packages':
-      ensure => 'uninstalled'
+      ensure => 'uninstalled',
+      service_state => $service_state
    }
 
    class { 'hdp-ganglia::server::files':
@@ -41,7 +42,10 @@
     service_state => $service_state
   }
 
-  class { 'hdp-ganglia::server::packages': }
+  class { 'hdp-ganglia::server::packages':
+    ensure => 'present',
+    service_state => $service_state
+  }
 
   class { 'hdp-ganglia::config': 
     ganglia_server_host => $hdp::params::host_address,
@@ -84,23 +88,36 @@
 }
 
 class hdp-ganglia::server::packages(
-  $ensure = present 
+  $ensure = present,
+  $service_state = 'installed_and_configured'
 )
 {
   hdp::package { ['ganglia-server','ganglia-gweb','ganglia-hdp-gweb-addons']: 
     ensure      => $ensure,
-    java_needed => false  
+    java_needed => false,
+    require => Hdp::Package ['rrdtool-python']
   }
 
-  hdp::package { ['rrdtool']:
-        ensure      => 'absent',
-        java_needed => false,
-        before => Hdp::Package ['rrdtool-python']
+  # Removing conflicting packages only once to workaround "/bin/rpm -e absent-absent-absent.absent" bug (BUG-2881)
+  if ($service_state == 'installed_and_configured' and $hdp::params::hdp_os_type == 'centos5') {
+    # Remove conflicting 32bit package
+    hdp::package { ['rrdtool-devel']:
+      ensure      => 'absent',
+      java_needed => false,
+      before => Hdp::Package ['rrdtool']
+    }
+
+    # Remove conflicting 32bit package
+    hdp::package { ['rrdtool']:
+      ensure      => 'absent',
+      java_needed => false,
+      before => Hdp::Package ['rrdtool-python']
+    }
   }
 
   hdp::package { ['rrdtool-python']:
-      ensure      => $ensure,
-      java_needed => false
+    ensure      => $ensure,
+    java_needed => false
   }
 
 }
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
index de4ba62..1e34e8f 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
@@ -91,10 +91,11 @@
 define hdp-hadoop::datanode::create_data_dirs($service_state)
 {
   $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create_ignore_failure { $dirs :
+  hdp::directory_recursive_create { $dirs :
     owner => $hdp-hadoop::params::hdfs_user,
     mode => '0750',
     service_state => $service_state,
     force => true
   }
+
 }
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
index 54c7117..5da23ce 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
@@ -50,12 +50,12 @@
       module => 'hdp-hadoop',
       configuration => $configuration['mapred-queue-acls'],
       owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::hadoop_user_group
+      group => $hdp::params::user_group
     }
   } else { # Manually overriding ownership of file installed by hadoop package
     file { "${hdp-hadoop::params::conf_dir}/mapred-queue-acls.xml":
       owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::hadoop_user_group
+      group => $hdp::params::user_group
     }
   }
   
@@ -66,7 +66,7 @@
       module => 'hdp-hadoop',
       configuration => $configuration['hadoop-policy'],
       owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::hadoop_user_group
+      group => $hdp::params::user_group
     }
   }
 
@@ -77,7 +77,7 @@
         module => 'hdp-hadoop',
         configuration => $configuration['core-site'],
         owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::hadoop_user_group
+        group => $hdp::params::user_group
       }
     }
 
@@ -88,7 +88,7 @@
       module => 'hdp-hadoop',
       configuration => $configuration['mapred-site'],
       owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::hadoop_user_group
+      group => $hdp::params::user_group
     }
   }
   
@@ -99,7 +99,7 @@
       module => 'hdp-hadoop',
       configuration => $configuration['capacity-scheduler'],
       owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::hadoop_user_group
+      group => $hdp::params::user_group
     }
   }
 
@@ -110,7 +110,7 @@
       module => 'hdp-hadoop',
       configuration => $configuration['hdfs-site'],
       owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::hadoop_user_group
+      group => $hdp::params::user_group
     }
   }
 
@@ -165,7 +165,7 @@
     hdp::user{ $hdfs_user:}
     hdp::user { $mapred_user:}
 
-    $logdirprefix = $hdp-hadoop::params::hadoop_logdirprefix
+    $logdirprefix = $hdp-hadoop::params::hdfs_log_dir_prefix
     hdp::directory_recursive_create { $logdirprefix: 
         owner => 'root'
     }
@@ -178,7 +178,7 @@
     if ($hdp::params::security_enabled == true) {
       file { "${hdp::params::hadoop_bin}/task-controller":
         owner   => 'root',
-        group   => $hdp::params::hadoop_user_group,
+        group   => $hdp::params::user_group,
         mode    => '6050',
         require => Hdp-hadoop::Package['hadoop'],
         before  => Anchor['hdp-hadoop::end']
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
index b79dd3a..33046b8 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
@@ -56,7 +56,7 @@
 
   $hadoop_heapsize = hdp_default("hadoop/hadoop-env/hadoop_heapsize","1024m")
 
-  $hadoop_logdirprefix = hdp_default("hadoop/hadoop-env/hadoop_logdirprefix","/var/log/hadoop")
+  $hdfs_log_dir_prefix = hdp_default("hadoop/hadoop-env/hdfs_log_dir_prefix","/var/log/hadoop")
 
   $hadoop_piddirprefix = hdp_default("hadoop/hadoop-env/hadoop_piddirprefix","/var/run/hadoop")
   $run_dir = $hadoop_piddirprefix
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
index b21303a..0ce7145 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
@@ -45,7 +45,7 @@
     $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
   } 
 
-  $log_dir = "${hdp-hadoop::params::hadoop_logdirprefix}/${user}"
+  $log_dir = "${hdp-hadoop::params::hdfs_log_dir_prefix}/${user}"
   $hadoop_daemon = "${hdp::params::hadoop_bin}/hadoop-daemon.sh"
    
   $cmd = "${hadoop_daemon} --config ${hdp-hadoop::params::conf_dir}"
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
index cc293e2..e0cea3f 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
@@ -40,14 +40,14 @@
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%> ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx128m ${HADOOP_CLIENT_OPTS}"
@@ -60,11 +60,11 @@
 export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
 
 # Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER
+export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER
 
 
 # Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$HADOOP_SECURE_DN_USER
+export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$HADOOP_SECURE_DN_USER
 
 # File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
 # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
index 24507fb..d7dffef 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
@@ -17,4 +17,4 @@
 # */
 mapred.local.dir=<%=scope.function_hdp_template_var("mapred_local_dir")%>
 mapreduce.tasktracker.group=hadoop
-hadoop.log.dir=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>
+hadoop.log.dir=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>
diff --git a/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh b/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh
new file mode 100644
index 0000000..710ce58
--- /dev/null
+++ b/ambari-agent/src/main/puppet/modules/hdp-mysql/files/addMysqlUser.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+mysqldbhost=$4
+
+service $mysqldservice start
+echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
+mysql -u root -e "flush privileges;"
+service $mysqldservice stop
\ No newline at end of file
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
index a9dfdec..f765e02 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/config.pp
@@ -22,6 +22,7 @@
 {
 
   $host_cfg = $hdp-nagios::params::nagios_host_cfg
+  $nagios_lookup_daemon_str = $hdp::params::nagios_lookup_daemon_strs[$hdp::params::hdp_os_type]
   
   hdp-nagios::server::configfile { 'nagios.cfg': conf_dir => $hdp-nagios::params::conf_dir }
   hdp-nagios::server::configfile { 'resource.cfg': conf_dir => $hdp-nagios::params::conf_dir }
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
index 2de50f1..aedbba6 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
+++ b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-commands.cfg.erb
@@ -20,11 +20,13 @@
 #
 #
 
+<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
 # 'check_cpu' check remote cpu load
 define command {
         command_name    check_cpu
         command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
        }
+<% end %>
 
 # Check data node storage full 
 define command {
diff --git a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
index 3951b4f..6c46c73 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
+++ b/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
@@ -35,7 +35,7 @@
         use                     hadoop-service
         service_description     NAGIOS::Nagios status log staleness
         servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!/usr/bin/nagios
+        check_command           check_nagios!10!/var/nagios/status.dat!<%=nagios_lookup_daemon_str%>
         normal_check_interval   5
         retry_check_interval    0.5
         max_check_attempts      2
@@ -201,7 +201,7 @@
         retry_check_interval    0.5
         max_check_attempts      3
 }
-
+<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
 define service {        
         hostgroup_name          namenode        
         use                     hadoop-service
@@ -212,6 +212,7 @@
         retry_check_interval    2 
         max_check_attempts      5
 }
+<% end %>
 
 define service {
         hostgroup_name          namenode
@@ -281,7 +282,7 @@
         retry_check_interval    1
         max_check_attempts      3
 }
-
+ <% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
 define service {
         hostgroup_name          jobtracker
         use                     hadoop-service
@@ -292,6 +293,7 @@
         retry_check_interval    2 
         max_check_attempts      5
 }
+<% end %>
 
 define service {
         hostgroup_name          jobtracker
@@ -391,7 +393,7 @@
         retry_check_interval    1
         max_check_attempts      3
 }
-
+<% if scope.function_hdp_template_var("hdp_os_type") != "suse"%>
 define service {
         hostgroup_name          hbasemaster
         use                     hadoop-service
@@ -402,7 +404,7 @@
         retry_check_interval    2 
         max_check_attempts      5
 }
-
+<% end %>
 define service {
         hostgroup_name          hbasemaster
         use                     hadoop-service
diff --git a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
index 94a4a2b..d46f909 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
@@ -40,21 +40,19 @@
     $lzo_jar_suffix = ""
   }
 
+  $cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
+  $cmd2 =  "cd /usr/lib/oozie && mkdir -p ${oozie_tmp}"
+  $cmd3 =  "cd /usr/lib/oozie && chown ${user}:hadoop ${oozie_tmp}"    
+  $cmd4 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path $lzo_jar_suffix"
+  $cmd5 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; echo 0"
+  $cmd6 =  "hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/${user}/share"
+  $cmd7 = "/usr/lib/oozie/bin/oozie-start.sh"
 
-  if ($ensure == 'running') {
-
-    $cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
-    $cmd2 =  "cd /usr/lib/oozie && mkdir -p ${oozie_tmp}"
-    $cmd3 =  "cd /usr/lib/oozie && chown ${user}:hadoop ${oozie_tmp}"
-    
-    $cmd4 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path $lzo_jar_suffix"
-    $cmd5 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/${user}/share"
-    $cmd6 =  "/usr/lib/oozie/bin/oozie-start.sh"
-
-
+  if ($ensure == 'installed_and_configured') {
     $sh_cmds = [$cmd1, $cmd2, $cmd3]
-    $user_cmds = [$cmd4, $cmd5, $cmd6]
-	
+    $user_cmds = [$cmd4, $cmd5]
+  } elsif ($ensure == 'running') {   
+    $start_cmd = "su - ${user} -c  'cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-start.sh'"
     $no_op_test = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
   } elsif ($ensure == 'stopped') {
     $stop_cmd  = "su - ${user} -c  'cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-stop.sh'"
@@ -71,16 +69,20 @@
 
   anchor{'hdp-oozie::service::begin':} -> Hdp-oozie::Service::Directory<||> -> anchor{'hdp-oozie::service::end':}
   
-  if ($ensure == 'running') {
+  if ($ensure == 'installed_and_configured') {
     hdp-oozie::service::exec_sh{$sh_cmds:}
     hdp-oozie::service::exec_user{$user_cmds:}
-    Hdp-oozie::Service::Directory<||> -> Hdp-oozie::Service::Exec_sh[$cmd1] -> Hdp-oozie::Service::Exec_sh[$cmd2] ->Hdp-oozie::Service::Exec_sh[$cmd3] -> Hdp-oozie::Service::Exec_user[$cmd4] ->Hdp-oozie::Service::Exec_user[$cmd5] -> Hdp-oozie::Service::Exec_user[$cmd6] -> Anchor['hdp-oozie::service::end']
+    Hdp-oozie::Service::Directory<||> -> Hdp-oozie::Service::Exec_sh[$cmd1] -> Hdp-oozie::Service::Exec_sh[$cmd2] ->Hdp-oozie::Service::Exec_sh[$cmd3] -> Hdp-oozie::Service::Exec_user[$cmd4] ->Hdp-oozie::Service::Exec_user[$cmd5] -> Anchor['hdp-oozie::service::end']
+  } elsif ($ensure == 'running') {
+    $user_cmds = [$cmd6, $cmd7]
+    hdp-oozie::service::exec_user{$user_cmds:}
+    Hdp-oozie::Service::Exec_user[$cmd6] -> Hdp-oozie::Service::Exec_user[$cmd7] -> Anchor['hdp-oozie::service::end']
   } elsif ($ensure == 'stopped') {
     hdp::exec { "exec $stop_cmd":
       command => $stop_cmd,
       unless  => $no_op_test,
       initial_wait => $initial_wait
-  }
+   }
   }
 }
 
diff --git a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
index 200edd5..e872075 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
@@ -84,12 +84,12 @@
 #    mode  => '755',
 #    dest_dir => '/apps/templeton/ugi.jar'
 #  }
-#  hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
-#   service_state => $service_state,
-#   owner => $hdp-templeton::params::templeton_user,
-#   mode  => '755',
-#   dest_dir => '/apps/templeton/hadoop-streaming.jar'
-# }
+  hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
+   service_state => $service_state,
+   owner => $hdp-templeton::params::templeton_user,
+   mode  => '755',
+   dest_dir => '/apps/webhcat/hadoop-streaming.jar'
+  }
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::pig_tar_name} instead
   hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/pig.tar.gz' :
     service_state => $service_state,
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
index cead9e3..8cd4c7d 100644
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp/manifests/configfile.pp
@@ -22,7 +22,7 @@
   $component,
   $conf_dir = undef, #if this is undef then name is of form conf_dir/file_name
   $owner = undef, 
-  $group = $hdp::params::hadoop_user_group,
+  $group = $hdp::params::user_group,
   $mode = undef,
   $size = 64, #32 or 64 bit (used to pick appropriate java_home)
   $template_tag = undef,
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
index 9333767..06cbe6e 100644
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
@@ -29,15 +29,15 @@
 
   Exec { logoutput => 'on_failure' }
 
-  group { $hdp::params::hadoop_user_group :
+  group { $hdp::params::user_group :
     ensure => present
   }
 
   #TODO: think not needed and also there seems to be a puppet bug around this and ldap
   hdp::user { $hdp::params::hadoop_user:
-    gid => $hdp::params::hadoop_user_group
+    gid => $hdp::params::user_group
   }
-  Group[$hdp::params::hadoop_user_group] -> Hdp::User[$hdp::params::hadoop_user] 
+  Group[$hdp::params::user_group] -> Hdp::User[$hdp::params::hadoop_user] 
   class { 'hdp::snmp': service_state => 'running'}
 
   class { 'hdp::create_smoke_user': }
@@ -141,7 +141,7 @@
 }
 
 define hdp::user(
-  $gid = $hdp::params::hadoop_user_group,
+  $gid = $hdp::params::user_group,
   $just_validate = undef
 )
 {
@@ -163,7 +163,7 @@
     user { $name:
       ensure     => present,
       managehome => true,
-      #gid        => $gid, #TODO either remove this to support LDAP env or fix it
+      gid        => $gid, #TODO either remove this to support LDAP env or fix it
       shell      => '/bin/bash'
     }
   }
@@ -171,7 +171,7 @@
      
 define hdp::directory(
   $owner = $hdp::params::hadoop_user,
-  $group = $hdp::params::hadoop_user_group,
+  $group = $hdp::params::user_group,
   $mode  = undef,
   $ensure = directory,
   $force = undef,
@@ -199,7 +199,7 @@
 #TODO: check on -R flag and use of recurse
 define hdp::directory_recursive_create(
   $owner = $hdp::params::hadoop_user,
-  $group = $hdp::params::hadoop_user_group,
+  $group = $hdp::params::user_group,
   $mode = undef,
   $context_tag = undef,
   $ensure = directory,
@@ -226,7 +226,7 @@
 
 define hdp::directory_recursive_create_ignore_failure(
   $owner = $hdp::params::hadoop_user,
-  $group = $hdp::params::hadoop_user_group,
+  $group = $hdp::params::user_group,
   $mode = undef,
   $context_tag = undef,
   $ensure = directory,
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
index 37a5eef..fe5a764 100644
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp/manifests/lzo/package.pp
@@ -22,14 +22,7 @@
 {
   $size = $name
 
-  case $hdp::params::hdp_os_type {
-    centos6, redhat6: {
-      $pkg_type = 'lzo-rhel6'
-    }
-    default: {
-      $pkg_type = 'lzo-rhel5'
-    }
-  }
+  $pkg_type = "lzo"
 
   hdp::package {"lzo ${size}":
     package_type  => "${pkg_type}",
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
index 6d72e9c..9414237 100644
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
@@ -134,13 +134,13 @@
   $smokeuser = hdp_default("smokeuser","ambari_qa")
   $smoke_user_group = hdp_default("smoke_user_group","users")
 
-  #because of Puppet user resource issue make sure that $hadoop_user is different from hadoop_user_group
+  #because of Puppet user resource issue make sure that $hadoop_user is different from user_group
   if ($security_enabled == true) {
     $hadoop_user = "root"
   } else {
     $hadoop_user = hdp_default("hadoop_user", "hadoop_deploy")
   }
-  $hadoop_user_group = hdp_default("hadoop_user_group","hadoop")
+  $user_group = hdp_default("user_group","hadoop")
 
   $ganglia_enabled = hdp_default("ganglia_enabled",true) 
 
@@ -305,15 +305,25 @@
     rrdtool-python => {
       64 => ['python-rrdtool.x86_64']
     },
+    # The 32bit version of package rrdtool-devel is removed on centos 5/6 to prevent conflict ( BUG-2881)
+    rrdtool-devel => {
+      64 => {
+        'ALL' => 'rrdtool-devel.i686',
+        'centos6' => 'rrdtool-devel.i686',
+        'centos5' => 'rrdtool-devel.i386',
+        'redhat6' => 'rrdtool-devel.i686',
+        'redhat5' => 'rrdtool-devel.i386'
+      }
+    },
     # The 32bit version of package rrdtool is removed on centos 5/6 to prevent conflict ( BUG-2408)
     rrdtool => {
-          64 => {
-            'ALL' => 'rrdtool.i686',
-            'centos6' => 'rrdtool.i686',
-            'centos5' => 'rrdtool.i386',
-            'redhat6' => 'rrdtool.i686',
-            'redhat5' => 'rrdtool.i386'
-            }
+      64 => {
+        'ALL' => 'rrdtool.i686',
+        'centos6' => 'rrdtool.i686',
+        'centos5' => 'rrdtool.i386',
+        'redhat6' => 'rrdtool.i686',
+        'redhat5' => 'rrdtool.i386'
+       }
     },
     ambari-log4j => {
       64 => ['ambari-log4j']
@@ -428,6 +438,11 @@
       64 => {'ALL' =>['hadoop','hadoop-libhdfs','hadoop-native','hadoop-pipes','hadoop-sbin','hadoop-lzo', 'hadoop-lzo-native']}
     },
 
+    lzo => {
+      'ALL' => {'ALL' => ['lzo', 'lzo.i686', 'lzo-devel', 'lzo-devel.i686'],
+                suse => ['lzo-devel']},
+    },
+
     glibc=> {
       'ALL' => {'ALL' => ['glibc','glibc.i686'],
                 suse => ['glibc']},
@@ -523,6 +538,17 @@
       64 => {'ALL' =>'python-rrdtool.x86_64'}
     },
 
+    # The 32bit version of package rrdtool-devel is removed on centos 5/6 to prevent conflict ( BUG-2881)
+    rrdtool-devel => {
+      64 => {
+        'ALL' => 'rrdtool-devel.i686',
+        'centos6' => 'rrdtool-devel.i686',
+        'centos5' => 'rrdtool-devel.i386',
+        'redhat6' => 'rrdtool-devel.i686',
+        'redhat5' => 'rrdtool-devel.i386'
+        }
+    },
+
     # The 32bit version of package rrdtool is removed on centos 5/6 to prevent conflict ( BUG-2408)
     rrdtool => {
       64 => {
@@ -562,6 +588,15 @@
     redhat6 => '/var/www/cgi-bin',
     redhat5 => '/var/www/cgi-bin'
   }
+  
+  $nagios_lookup_daemon_strs = 
+  {
+    suse => '/usr/sbin/nagios',
+    centos6 => '/usr/bin/nagios',
+    centos5 => '/usr/bin/nagios',
+    redhat6 => '/usr/bin/nagios',
+    redhat5 => '/usr/bin/nagios'
+  }
 
 
 
diff --git a/ambari-agent/src/main/python/ambari_agent/StatusCheck.py b/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
index 4c24325..918f42b 100644
--- a/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
+++ b/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
@@ -23,6 +23,7 @@
 import logging.handlers
 import sys
 import os
+import re
 
 logger = logging.getLogger()
 
@@ -72,6 +73,9 @@
       self.serToPidDict = dict(self.get_pair(line) for line in fd)
 
   def getIsLive(self, pidPath):
+    if not pidPath:
+      return False
+
     isLive = False
     pid = -1
     try:
@@ -91,15 +95,18 @@
 
   def getStatus(self, serviceCode):
     try:
-      pidName = self.serToPidDict[serviceCode]
-      logger.info( 'pidName: ' + pidName)
+      pidPath = None
+      pidPattern = self.serToPidDict[serviceCode]
+      logger.info( 'pidPattern: ' + pidPattern)
     except KeyError as e:
       logger.warn('There is no mapping for ' + serviceCode)
       return None
     try:
-      pidPath = self.pidFilesDict[pidName]
-      logger.info('pidPath: ' + pidPath)
-      result = self.getIsLive(self.pidFilesDict[pidName])
+      for pidFile in self.pidFilesDict.keys():
+        if re.match(pidPattern, pidFile):
+          pidPath = self.pidFilesDict[pidFile]          
+      logger.info('pidPath: ' + str(pidPath))
+      result = self.getIsLive(pidPath)
       return result
     except KeyError:
       logger.info('Pid file was not found')
diff --git a/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict b/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict
index 0a2fda5..8a56640 100644
--- a/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict
+++ b/ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict
@@ -13,11 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-NAMENODE=hadoop-hdfs-namenode.pid
-SECONDARY_NAMENODE=hadoop-hdfs-secondarynamenode.pid
-DATANODE=hadoop-hdfs-datanode.pid
-JOBTRACKER=hadoop-mapred-jobtracker.pid
-TASKTRACKER=hadoop-mapred-tasktracker.pid
+NAMENODE=hadoop-[a-z_]+-namenode.pid$
+SECONDARY_NAMENODE=hadoop-[a-z_]+-secondarynamenode.pid$
+DATANODE=hadoop-[a-z_]+-datanode.pid$
+JOBTRACKER=hadoop-[a-z_]+-jobtracker.pid$
+TASKTRACKER=hadoop-[a-z_]+-tasktracker.pid$
 OOZIE_SERVER=oozie.pid
 ZOOKEEPER_SERVER=zookeeper_server.pid
 TEMPLETON_SERVER=templeton.pid
diff --git a/ambari-server/src/main/java/org/apache/ambari/eventdb/model/DataTable.java b/ambari-server/src/main/java/org/apache/ambari/eventdb/model/DataTable.java
new file mode 100644
index 0000000..bbd6a06
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/eventdb/model/DataTable.java
@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.eventdb.model;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.ambari.eventdb.model.Workflows.WorkflowDBEntry;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class DataTable {
+  int sEcho;
+  int iTotalRecords;
+  int iTotalDisplayRecords;
+  List<WorkflowDBEntry> aaData;
+  Summary summary;
+  
+  @XmlRootElement
+  @XmlAccessorType(XmlAccessType.FIELD)
+  public static class Summary {
+    public static enum SummaryFields {
+      numRows,
+      avgJobs,
+      minJobs,
+      maxJobs,
+      avgInput,
+      minInput,
+      maxInput,
+      avgOutput,
+      minOutput,
+      maxOutput,
+      avgDuration,
+      minDuration,
+      maxDuration,
+      youngest,
+      oldest;
+      
+      public int getInt(ResultSet rs) throws SQLException {
+        return rs.getInt(this.toString());
+      }
+      
+      public long getLong(ResultSet rs) throws SQLException {
+        return rs.getLong(this.toString());
+      }
+      
+      public double getDouble(ResultSet rs) throws SQLException {
+        return rs.getDouble(this.toString());
+      }
+    }
+    
+    int numRows;
+    AvgData jobs;
+    AvgData input;
+    AvgData output;
+    AvgData duration;
+    Times times;
+    
+    public int getNumRows() {
+      return numRows;
+    }
+    
+    public void setNumRows(int numRows) {
+      this.numRows = numRows;
+    }
+    
+    public AvgData getJobs() {
+      return jobs;
+    }
+    
+    public void setJobs(AvgData jobs) {
+      this.jobs = jobs;
+    }
+    
+    public AvgData getInput() {
+      return input;
+    }
+    
+    public void setInput(AvgData input) {
+      this.input = input;
+    }
+    
+    public AvgData getOutput() {
+      return output;
+    }
+    
+    public void setOutput(AvgData output) {
+      this.output = output;
+    }
+    
+    public AvgData getDuration() {
+      return duration;
+    }
+    
+    public void setDuration(AvgData duration) {
+      this.duration = duration;
+    }
+    
+    public Times getTimes() {
+      return times;
+    }
+    
+    public void setTimes(Times times) {
+      this.times = times;
+    }
+  }
+  
+  @XmlRootElement
+  @XmlAccessorType(XmlAccessType.FIELD)
+  public static class AvgData {
+    double avg;
+    long min;
+    long max;
+    
+    public double getAvg() {
+      return avg;
+    }
+    
+    public void setAvg(double avg) {
+      this.avg = avg;
+    }
+    
+    public long getMin() {
+      return min;
+    }
+    
+    public void setMin(long min) {
+      this.min = min;
+    }
+    
+    public long getMax() {
+      return max;
+    }
+    
+    public void setMax(long max) {
+      this.max = max;
+    }
+  }
+  
+  @XmlRootElement
+  @XmlAccessorType(XmlAccessType.FIELD)
+  public static class Times {
+    long oldest;
+    long youngest;
+    
+    public long getOldest() {
+      return oldest;
+    }
+    
+    public void setOldest(long oldest) {
+      this.oldest = oldest;
+    }
+    
+    public long getYoungest() {
+      return youngest;
+    }
+    
+    public void setYoungest(long youngest) {
+      this.youngest = youngest;
+    }
+  }
+  
+  public DataTable() {}
+  
+  public int getsEcho() {
+    return sEcho;
+  }
+  
+  public void setsEcho(int sEcho) {
+    this.sEcho = sEcho;
+  }
+  
+  public int getiTotalRecords() {
+    return iTotalRecords;
+  }
+  
+  public void setiTotalRecords(int iTotalRecords) {
+    this.iTotalRecords = iTotalRecords;
+  }
+  
+  public int getiTotalDisplayRecords() {
+    return iTotalDisplayRecords;
+  }
+  
+  public void setiTotalDisplayRecords(int iTotalDisplayRecords) {
+    this.iTotalDisplayRecords = iTotalDisplayRecords;
+  }
+  
+  public List<WorkflowDBEntry> getAaData() {
+    return aaData;
+  }
+  
+  public void setAaData(List<WorkflowDBEntry> aaData) {
+    this.aaData = aaData;
+  }
+  
+  public Summary getSummary() {
+    return summary;
+  }
+  
+  public void setSummary(Summary summary) {
+    this.summary = summary;
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java
index ac666b3..b6cd38c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/QueryImpl.java
@@ -99,7 +99,7 @@
 
   @Override
   //todo: consider requiring a path and a property.  For categories the property name '*' could be used.
-  public void addProperty(String category, String property, TemporalInfo temporalInfo) {
+  public void addProperty(String category, String property, TemporalInfo temporalInfo) {    
     if (category == null && property.equals("*")) {
       // wildcard
       addAllProperties(temporalInfo);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
index aec86c2..32adaa5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/BaseRequest.java
@@ -116,6 +116,12 @@
 
     Set<BasePredicate> setPredicates = new HashSet<BasePredicate>();
     for (String outerToken : tokens) {
+      if (outerToken.startsWith("_=")) {
+        // NOTE: This is to enable UI to pass a _= parameter for unique query 
+        // string even though the backend doesnt need it.
+        continue;
+      }
+      
       if (outerToken != null &&  !outerToken.startsWith("fields")) {
         setPredicates.add(outerToken.contains("|") ?
             handleOrPredicate(outerToken) : createPredicate(outerToken));
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 091b586..20cb3d9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -2033,8 +2033,12 @@
               !newState.isValidClientComponentState()) {
             continue;
           }
+          /** 
+           * This is hack for now wherein we dont fail if the 
+           * sch is in INSTALL_FAILED 
+           */
           if (!isValidStateTransition(oldSchState, newState)) {
-            throw new AmbariException("Invalid transition for"
+            String error = "Invalid transition for"
                 + " servicecomponenthost"
                 + ", clusterName=" + cluster.getClusterName()
                 + ", clusterId=" + cluster.getClusterId()
@@ -2042,7 +2046,17 @@
                 + ", componentName=" + sch.getServiceComponentName()
                 + ", hostname=" + sch.getHostName()
                 + ", currentState=" + oldSchState
-                + ", newDesiredState=" + newState);
+                + ", newDesiredState=" + newState;
+            StackId sid = cluster.getDesiredStackVersion();
+            
+            if ( ambariMetaInfo.getComponentCategory(
+                sid.getStackName(), sid.getStackVersion(), sc.getServiceName(),
+                sch.getServiceComponentName()).isMaster()) {
+              throw new AmbariException(error);
+            } else {
+              LOG.warn("Ignoring: " + error);
+              continue;
+            }
           }
           if (!changedScHosts.containsKey(sc.getName())) {
             changedScHosts.put(sc.getName(),
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index 45b1a38..2f8a791 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -135,10 +135,12 @@
           ClassPathXmlApplicationContext(contextLocations, parentSpringAppContext);
       //setting ambari web context
 
-      ServletContextHandler root = new ServletContextHandler(server, CONTEXT_PATH, 
-          ServletContextHandler.NO_SECURITY | ServletContextHandler.SECURITY |
+      ServletContextHandler root = new ServletContextHandler(server, CONTEXT_PATH,
           ServletContextHandler.SECURITY | ServletContextHandler.SESSIONS);
 
+      //Changing session cookie name to avoid conflicts
+      root.getSessionHandler().getSessionManager().setSessionCookie("AMBARISESSIONID");
+
       GenericWebApplicationContext springWebAppContext = new GenericWebApplicationContext();
       springWebAppContext.setServletContext(root.getServletContext());
       springWebAppContext.setParent(springAppContext);
@@ -317,6 +319,7 @@
        * Start the server after controller state is recovered.
        */
       server.start();
+
       serverForAgent.start();
       LOG.info("********* Started Server **********");
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
index 2f1a2b4..9f66f4c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
@@ -110,6 +110,8 @@
         RoleCommand.START);
     addDependency(Role.HIVE_METASTORE, RoleCommand.START, Role.MYSQL_SERVER,
         RoleCommand.START);
+    addDependency(Role.HIVE_SERVER, RoleCommand.START, Role.MYSQL_SERVER,
+        RoleCommand.START);
 
     // Service checks
     addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.NAMENODE,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java b/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
index 125af7f..349b610 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/CertificateManager.java
@@ -181,7 +181,7 @@
    * Adds agent certificate to server keystore
    * @return string with agent signed certificate content
    */
-  public SignCertResponse signAgentCrt(String agentHostname, String agentCrtReqContent, String passphraseAgent) {
+  public synchronized SignCertResponse signAgentCrt(String agentHostname, String agentCrtReqContent, String passphraseAgent) {
     SignCertResponse response = new SignCertResponse();
     LOG.info("Signing of agent certificate");
     LOG.info("Verifying passphrase");
diff --git a/ambari-server/src/main/python/setupAgent.py b/ambari-server/src/main/python/setupAgent.py
index 34552f1..14e12a4 100644
--- a/ambari-server/src/main/python/setupAgent.py
+++ b/ambari-server/src/main/python/setupAgent.py
@@ -48,14 +48,6 @@
       return True
   return False
 
-def installPreReqSuse():
-  """ required for ruby deps """
-  # remove once in the repo
-  zypperCommand = ["zypper", "install", "-y",
-    "http://download.opensuse.org/repositories/home:/eclipseagent:/puppet/SLE_11_SP1/x86_64/ruby-augeas-0.4.1-26.3.x86_64.rpm",
-    "http://download.opensuse.org/repositories/home:/eclipseagent:/puppet/SLE_11_SP1/x86_64/ruby-shadow-1.4.1-2.2.x86_64.rpm"]
-  return execOsCommand(zypperCommand)
-
 def installAgentSuse():
   """ Run zypper install and make sure the agent install alright """
   zypperCommand = ["zypper", "install", "-y", "ambari-agent"]
@@ -112,7 +104,6 @@
   hostName = onlyargs[1]
 
   if is_suse():
-    installPreReqSuse()
     installAgentSuse()
   else:
     installPreReq()
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index c730428..cbc2a50 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -3,7 +3,8 @@
         "Clusters/cluster_id",
         "Clusters/cluster_name",
         "Clusters/version",
-        "Clusters/state"
+        "Clusters/state",
+        "_"
     ],
     "Service":[
         "ServiceInfo/service_name",
@@ -12,7 +13,8 @@
         "Services/description",
         "Services/display_name",
         "Services/attributes",
-        "ServiceInfo/desired_configs"
+        "ServiceInfo/desired_configs",
+        "_"
     ],
     "Host":[
         "Hosts/cluster_name",
@@ -30,7 +32,8 @@
         "Hosts/host_status",
         "Hosts/host_health_report",
         "Hosts/public_host_name",
-        "Hosts/host_state"
+        "Hosts/host_state",
+        "_"
     ],
     "Component":[
         "ServiceComponentInfo/service_name",
@@ -39,7 +42,8 @@
         "ServiceComponentInfo/state",
         "ServiceComponents/display_name",
         "ServiceComponents/description",
-        "ServiceComponentInfo/desired_configs"
+        "ServiceComponentInfo/desired_configs",
+        "_"
     ],
     "HostComponent":[
         "HostRoles/role_id",
@@ -49,7 +53,8 @@
         "HostRoles/state",
         "HostRoles/desired_state",
         "HostRoles/configs",
-        "HostRoles/desired_configs"
+        "HostRoles/desired_configs",
+        "_"
     ],
     "Configuration":[
         "Config/tag",
@@ -59,12 +64,14 @@
     "Action":[
         "Actions/cluster_name",
         "Actions/service_name",
-        "Actions/action_name"
+        "Actions/action_name",
+        "_"
     ],
     "Request":[
         "Requests/id",
         "Requests/cluster_name",
-        "Requests/request_status"
+        "Requests/request_status",
+        "_"
     ],
     "Task":[
         "Tasks/id",
@@ -79,13 +86,15 @@
         "Tasks/stderr",
         "Tasks/stdout",
         "Tasks/start_time",
-        "Tasks/attempt_cnt"
+        "Tasks/attempt_cnt",
+        "_"
     ],
     "User":[
         "Users/user_name",
         "Users/roles",
         "Users/password",
         "Users/old_password",
-        "Users/ldap_user"
+        "Users/ldap_user",
+        "_"
     ]
 }
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
index 4a133db..31d0113 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
@@ -117,4 +117,10 @@
     <description>The hdfs path to the Hadoop streaming jar file.</description>
   </property> 
 
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
 </configuration>
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
index 4a133db..31d0113 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
@@ -117,4 +117,10 @@
     <description>The hdfs path to the Hadoop streaming jar file.</description>
   </property> 
 
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
 </configuration>
diff --git a/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php b/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
index 223740a..8dbffc3 100644
--- a/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
+++ b/contrib/addons/src/addOns/nagios/scripts/nagios_alerts.php
@@ -102,7 +102,7 @@
 
   /* If SUSE, status file is under /var/lib/nagios */
   if (file_exists("/etc/SuSE-release")) {
-    $status_file="/var/lib/nagios/status.dat";
+    $status_file="/var/nagios/status.dat";
   } else {
     $status_file="/var/nagios/status.dat";
   }
diff --git a/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryUpdater.java b/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryUpdater.java
index 1e66482..3563e1b 100644
--- a/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryUpdater.java
+++ b/contrib/ambari-log4j/src/main/java/org/apache/ambari/log4j/hadoop/mapreduce/jobhistory/MapReduceJobHistoryUpdater.java
@@ -163,24 +163,24 @@
     
     workflowUpdateNumCompletedPS =
         connection.prepareStatement(
-            "WITH sums as (SELECT sum(inputBytes) as input, " +
-                "sum(outputBytes) as output, workflowId FROM " +
-                JOB_TABLE +
-                " WHERE workflowId = (SELECT workflowId FROM " +
-                JOB_TABLE +
-                " WHERE jobId = ?) AND status = 'SUCCESS'" +
-                " GROUP BY workflowId) " +
-                "UPDATE " +
+            "UPDATE " +
                 WORKFLOW_TABLE +
                 " SET " +
                 "lastUpdateTime = ?, " +
                 "duration = ? - (SELECT startTime FROM " +
                 WORKFLOW_TABLE +
-                " WHERE workflowId = (SELECT workflowId FROM sums)), " +
-                "numJobsCompleted = numJobsCompleted + 1, " +
-                "inputBytes = (select input from sums), " +
-                "outputBytes = (select output from sums) " +
-                "WHERE workflowId = (select workflowId from sums)"
+                " WHERE workflowId = selectid), " +
+                "numJobsCompleted = rows, " +
+                "inputBytes = input, " +
+                "outputBytes = output " +
+            "FROM (SELECT count(*) as rows, sum(inputBytes) as input, " +
+                "sum(outputBytes) as output, workflowId as selectid FROM " +
+                JOB_TABLE +
+                " WHERE workflowId = (SELECT workflowId FROM " +
+                JOB_TABLE +
+                " WHERE jobId = ?) AND status = 'SUCCESS' " +
+                "GROUP BY workflowId) as jobsummary " +
+            "WHERE workflowId = selectid"
             );
     
     // JobFinishedEvent
@@ -714,9 +714,9 @@
       entityPS.setString(8, historyEvent.getJobid().toString());
       entityPS.executeUpdate();
       // job finished events always have success status
-      workflowUpdateNumCompletedPS.setString(1, historyEvent.getJobid().toString());
+      workflowUpdateNumCompletedPS.setLong(1, historyEvent.getFinishTime());
       workflowUpdateNumCompletedPS.setLong(2, historyEvent.getFinishTime());
-      workflowUpdateNumCompletedPS.setLong(3, historyEvent.getFinishTime());
+      workflowUpdateNumCompletedPS.setString(3, historyEvent.getJobid().toString());
       workflowUpdateNumCompletedPS.executeUpdate();
     } catch (SQLException sqle) {
       LOG.info("Failed to store " + historyEvent.getEventType() + " for job " + 
@@ -831,7 +831,11 @@
       entityPS.setString(3, historyEvent.getTaskStatus());
       entityPS.setLong(4, historyEvent.getFinishTime());
       entityPS.setString(5, historyEvent.getError());
-      entityPS.setString(6, historyEvent.getFailedAttemptID().toString());
+      if (historyEvent.getFailedAttemptID() != null) {
+        entityPS.setString(6, historyEvent.getFailedAttemptID().toString());
+      } else {
+        entityPS.setString(6, "task_na");
+      }
       entityPS.setString(7, historyEvent.getTaskId().toString());
       entityPS.executeUpdate();
     } catch (SQLException sqle) {