Added configuration support for snapshot scans and other misc settings

Set HDFS dfs.namenode.acls.enabled=true and added several setfacl commands
as configuration support for snapshot scans.
Set HBase hbase.regionserver.lease.period to 10 minutes, this setting
is synonymous with hbase.regionserver.lease.period
Fixed a few temp file paths

edit 1: added mkdir and chown for the acls directories for the build machines
        which didn't seem to have them.

Change-Id: I3a00018ed3980be06b3c4886d015338d4c4cb11f
diff --git a/installer/traf_cloudera_mods98 b/installer/traf_cloudera_mods98
index 95627ba..da80a55 100755
--- a/installer/traf_cloudera_mods98
+++ b/installer/traf_cloudera_mods98
@@ -146,7 +146,11 @@
                 "name" : "secondary_namenode_java_heapsize",
         "value" : "1073741824"
                 } ]
-     } ]
+     } ],
+    "items": [ {
+             "name":"dfs_namenode_acls_enabled",
+             "value":"true"
+             } ]
 }' \
 http://$URL/api/v1/clusters/$CLUSTER_NAME/services/hdfs/config > $LOCAL_WORKDIR/traf_hdfs_config_temp
 
@@ -171,12 +175,15 @@
 rm $LOCAL_WORKDIR/traf_hdfs_config_temp 2> /dev/null
 
 # change the hbase configuration using Cloudera Manager's REST api
+# NOTE: hbase.regionserver.lease.period is used as it is equivalent to
+#       hbase.client.scanner.timeout.period and Cloudera only allows
+#       hbase.regionserver.lease.period to be set through the REST API.
 curl -X PUT -H 'Content-Type:application/json' -u $ADMIN:$PASSWORD  --data \
 '{ "roleTypeConfigs" :	[ {
 	"roleType" : "MASTER",
 	"items" : [ { 
 		"name" : "hbase_master_config_safety_valve", 
-        "value" : "<property>\r\n   <name>hbase.master.distributed.log.splitting</name>\r\n   <value>false</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.ma    ster.timeoutMillis</name>\r\n   <value>600000</value>\r\n</property>\r\n"
+        "value" : "<property>\r\n   <name>hbase.master.distributed.log.splitting</name>\r\n   <value>false</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.master.timeoutMillis</name>\r\n   <value>600000</value>\r\n</property>\r\n"
 		} ]
     }, {
 	"roleType" : "REGIONSERVER", 
@@ -184,6 +191,9 @@
 		"name" : "hbase_coprocessor_region_classes", 
                 "value" : "org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation"
 		}, {
+		"name" : "hbase_regionserver_lease_period", 
+		"value" : "600000"
+		}, {
 		"name" : "hbase_regionserver_config_safety_valve", 
 		"value" : "<property>\r\n   <name>hbase.hregion.impl</name>\r\n   <value>org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion</value>\r\n</property>\r\n <property>\r\n   <name>hbase.regionserver.region.split.policy</name>\r\n   <value>org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy</value>\r\n</property>\r\n  <property>\r\n   <name>hbase.snapshot.enabled</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.bulkload.staging.dir</name>\r\n   <value>/hbase-staging</value>\r\n</property>\r\n <property>\r\n   <name>hbase.regionserver.region.transactional.tlog</name>\r\n   <value>true</value>\r\n</property>\r\n <property>\r\n   <name>hbase.snapshot.region.timeout</name>\r\n   <value>600000</value>\r\n</property>\r\n "
 		} ] 
@@ -203,14 +213,14 @@
 if [ $curl_error -ne 0 ]; then
     echo "***ERROR: Unable to modify HBase configuration through Cloudera's REST API."
     echo "***ERROR: Check that HBase is running without error."
-    cat traf_hbase_config_temp
+    cat $LOCAL_WORKDIR/traf_hbase_config_temp
     exit -1
 fi
 curl_error=$(grep message $LOCAL_WORKDIR/traf_hbase_config_temp | wc -l)
 if [ $curl_error -ne 0 ]; then
     echo "***ERROR: Unable to modify HBase configuration through Cloudera's REST API."
     echo "***ERROR: Check that HBase is running without error."
-    cat traf_hbase_config_temp
+    cat $LOCAL_WORKDIR/traf_hbase_config_temp
     exit -1
 fi
 rm $LOCAL_WORKDIR/traf_hbase_config_temp 2> /dev/null
@@ -248,3 +258,26 @@
 
 echo "***INFO: HBase restart completed successfully"
 
+#====================================================
+# NOTE: These command must be done AFTER acls are 
+#       enabled and HDFS has been restarted
+echo "***INFO: Setting HDFS ACLs for snapshot scan support"
+sudo su hdfs --command "hdfs dfs -mkdir -p /hbase/archive"
+sudo su hdfs --command "hdfs dfs chown hbase:hbase /hbase/archive"
+sudo su hdfs --command "hdfs dfs -setfacl -R -m user:$TRAF_USER:rwx /hbase/archive"
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m user:$TRAF_USER:rwx /hbase/archive) command failed"
+   exit -1
+fi
+sudo su hdfs --command "hdfs dfs -setfacl -R -m default:user:$TRAF_USER:rwx /hbase/archive"
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m default:user:$TRAF_USER:rwx /hbase/archive) command failed"
+   exit -1
+fi
+sudo su hdfs --command "hdfs dfs -setfacl -R -m mask::rwx /hbase/archive"
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx /hbase/archive) command failed"
+   exit -1
+fi
+
+
diff --git a/installer/traf_hortonworks_mods98 b/installer/traf_hortonworks_mods98
index 68e223a..394061e 100755
--- a/installer/traf_hortonworks_mods98
+++ b/installer/traf_hortonworks_mods98
@@ -183,6 +183,18 @@
     exit -1
 fi
 sleep 2
+$AMBARI_DIR/configs.sh -port $PORT set $AMBARI_HOST $CLUSTER_NAME hdfs-site dfs.namenode.acls.enabled true
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify dfs.namenode.acls.enabled through Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2
+$AMBARI_DIR/configs.sh -port $PORT set $AMBARI_HOST $CLUSTER_NAME hbase-site hbase.client.scanner.timeout.period 600000
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.client.scanner.timeout.period through Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2
 echo
 
 #=====================================
@@ -250,7 +262,7 @@
 command_id=$(cat $TRAF_WORKDIR/traf_hbase_restart_temp | grep id | awk '{print $3}' | sed -e 's@,@@' )
 echo "***DEBUG: Ambari command_id=$command_id"
 
-# poll until stop is completed as a stop can take a while
+# poll until start is completed as a start can take a while
 completed=0
 while [ $completed -eq 0 ]; do
     sleep $poll_time
@@ -263,6 +275,106 @@
     completed=$(cat $LOCAL_WORKDIR/hbase_restart_status_temp | grep '"request_status" : "COMPLETED"' | wc -l)
 done
 
+#=====================================
+# restart HDFS to pick up all the changes just made
+
+echo "***INFO: Restarting HDFS to pick up config changes for Trafodion"
+echo "***INFO: Stopping HDFS..."
+curl --user $ADMIN:$PASSWORD \
+    -H "X-Requested-By: Trafodion" \
+    -X PUT -d '{"ServiceInfo": { "state" : "INSTALLED" }}' \
+    http://$URL/api/v1/clusters/$CLUSTER_NAME/services/HDFS > $TRAF_WORKDIR/traf_hdfs_restart_temp
+
+if [ $? != 0 ]; then 
+   echo "***ERROR: Unable to restart HDFS"
+   echo "***ERROR: Please manually restart HDFS through the Ambari web GUI"
+fi 
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $TRAF_WORKDIR/traf_hdfs_restart_temp | grep Error | wc -l)
+
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to restart HDFS"
+    echo "***ERROR: Please manually restart HDFS through the Ambari web GUI"
+fi
+
+echo "***INFO: ...polling every $poll_time seconds until restart is completed."
+command_id=$(cat $TRAF_WORKDIR/traf_hdfs_restart_temp | grep id | awk '{print $3}' | sed -e 's@,@@' )
+echo "***DEBUG: Ambari command_id=$command_id"
+
+# poll until stop is completed as a stop can take a while
+completed=0
+while [ $completed -eq 0 ]; do
+    sleep $poll_time
+    curl --user $ADMIN:$PASSWORD \
+        http://$URL/api/v1/clusters/$CLUSTER_NAME/requests/$command_id \
+        > $LOCAL_WORKDIR/traf_hdfs_restart_temp
+    cat $LOCAL_WORKDIR/traf_hdfs_restart_temp
+    echo "***INFO: ...polling every $poll_time seconds until stop is completed."
+    # if stop command is completed then completed will not equal 0
+    completed=$(cat $LOCAL_WORKDIR/traf_hdfs_restart_temp | grep '"request_status" : "COMPLETED"' | wc -l)
+done
+
+echo "***INFO: Starting HDFS..."
+curl --user $ADMIN:$PASSWORD \
+    -H "X-Requested-By: Trafodion" \
+    -X PUT -d '{"ServiceInfo": { "state" : "STARTED" }}' \
+    http://$URL/api/v1/clusters/$CLUSTER_NAME/services/HDFS > $TRAF_WORKDIR/traf_hdfs_restart_temp
+
+if [ $? != 0 ]; then
+   echo "***ERROR: Unable to restart HDFS"
+   echo "***ERROR: Please manually restart HDFS through the Ambari web GUI"
+fi
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $TRAF_WORKDIR/traf_hdfs_restart_temp | grep Error | wc -l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to restart HDFS"
+    echo "***ERROR: Please manually restart HDFS through the Ambari web GUI"
+fi
+
+echo "***INFO: ...polling every $poll_time seconds until start is completed."
+command_id=$(cat $TRAF_WORKDIR/traf_hdfs_restart_temp | grep id | awk '{print $3}' | sed -e 's@,@@' )
+echo "***DEBUG: Ambari command_id=$command_id"
+
+# poll until start is completed as a start can take a while
+completed=0
+while [ $completed -eq 0 ]; do
+    sleep $poll_time
+    curl --user $ADMIN:$PASSWORD \
+        http://$URL/api/v1/clusters/$CLUSTER_NAME/requests/$command_id \
+        > $LOCAL_WORKDIR/traf_hdfs_restart_temp
+    cat $LOCAL_WORKDIR/traf_hdfs_restart_temp
+    echo "***INFO: ...polling every $poll_time seconds until stop is completed."
+    # if start command is completed then completed will not equal 0
+    completed=$(cat $LOCAL_WORKDIR/traf_hdfs_restart_temp | grep '"request_status" : "COMPLETED"' | wc -l)
+done
+
+#=====================================
+# NOTE: These command must be done AFTER acls are 
+#       enabled and HDFS has been restarted
+echo "***INFO: Setting HDFS ACLs for snapshot scan support"
+sudo su hdfs --command "hdfs dfs -mkdir -p /apps/hbase/data/archive"
+sudo su hdfs --command "hdfs dfs chown hbase:hdfs apps/hbase/data/archive"
+sudo su hdfs --command "hdfs dfs -setfacl -R -m user:$TRAF_USER:rwx /apps/hbase/data/archive"
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive) command failed"
+   exit -1
+fi
+sudo su hdfs --command "hdfs dfs -setfacl -R -m default:user:$TRAF_USER:rwx /apps/hbase/data/archive"
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive) command failed"
+   exit -1
+fi
+sudo su hdfs --command "hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive"
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx /apps/hbase/data/archive) command failed"
+   exit -1
+fi
+
+
 # clean up files generated by Ambari's config.sh script
 rm $LOCAL_WORKDIR/doSet_version*