blob: 84b317bb81bbd96ee3351fb0e918c53d995d188f [file] [log] [blame]
#!/bin/sh
# @@@ START COPYRIGHT @@@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# @@@ END COPYRIGHT @@@
#
##############################################################################
##
## This sets up a Hadoop/Hive/HBase environment to be used with a trafodion
## workstation development environment. This script is meant for developers
## and has the following characteristics:
##
## - Creates a pseudo-distributed single node installation
## - Sandboxes the installation into a single directory,
## $MY_SQROOT/sqf/sql/local_hadoop
## - creates some convenience shell scripts in $MY_SQROOT/sqf/sql/scripts
## to start and stop Hadoop and for some interactive shells
## - Does not require sudo privileges for installing or running Hadoop
## - Can run on non-standard ports, if needed, to be able to run multiple
## Hadoop/Hive/HBase instances on the same machine
## - Uses a file system directory to store HBase data, not HDFS
## - Uses MySQL as the Hive metastore
## - Creates a TPC-DS sample database in Hive
##
##############################################################################
# Environment variables - optional
#
# MY_LOCAL_SW_DIST - shared location on local network for tar balls for Hadoop, etc.
# - Hadoop
# - HBase
# - Hive
# - MySQL (used as Hive metastore DB)
# - MySQL connector for Java (used by Hive to access metastore)
# - TPC-DS from tpc.org (for Hive sample DB)
# also set http_proxy and ftp_proxy if necessary, to download
# files from repositories on the Internet
#
# DCS to use. This list is in order of precedent.
# DCS_TAR - Optionally specify a local tar file to use
# DCS_URL - Optionally specify a URL to download
# DCS_SRC - Optionally specify a local source tree to use
# If none specified, download latest code from github
#
# Trafodion REST to use. This list is in order of precedent.
# REST_TAR - Optionally specify a local tar file to use
# REST_URL - Optionally specify a URL to download
# REST_SRC - Optionally specify a local source tree to use
# If none specified, download latest code from github
#
# phoenix_test
# PHX_SRC - Optionally specify a local source tree to use
# otherwise, download latest code from github
##############################################################################
function usage {
cat <<EOF
Usage:
$MY_CMD [ -p {<start port num> | rand | fromDisplay} ]
[ -y ]
[ -n ]
[ -v ]
-p configures non-standard ports, and is one of:
-p <start port num> custom cases
-p rand for shared systems, use a random start port number
between 9000 and 49000 that is divisible by 200
-p fromDisplay if you are running on a VNC session
-y answers interactive questions implicitly with yes
-n takes no action, useful with -v
-v lists the port values used
See script header for use of optional environment variables.
EOF
}
function check_ssh {
SSH_FAILED=no
echo
echo "Check ssh ${MY_HOST_1} access without a password..."
# disable any options that would query the user terminal, ask for strict host key checking
# to force it to fail if something is not quite right with the host key
ssh -o PasswordAuthentication=no -o KbdInteractiveDevices=none -o StrictHostKeyChecking=yes ${MY_HOST_1} echo "testing ssh ${MY_HOST_1}"
if [ $? -ne 0 ]; then
# if we deal with multiple systems, don't wipe out .ssh directory
# try some simpler measures and give up if those don't work
echo "Problems encountered with ssh, trying to fix some common issues..."
if [ -d ~/.ssh ]; then
grep -q NoHostAuthenticationForLocalhost ~/.ssh/config
if [ $? -ne 0 ]; then
# configure ssh to omit host check for localhost
grep -qi ^host ~/.ssh/config
if [ $? -eq 0 ]; then
# make sure this applies to all hosts, even if
# there are host directives in the file
echo "host *" >>~/.ssh/config
fi
echo "NoHostAuthenticationForLocalhost=yes" >>~/.ssh/config
chmod go-w ~/.ssh/config
fi
if [ ${MY_HOST_1} != "localhost" ]; then
# remove existing entries for the host from known_hosts
if [ -f ~/.ssh/known_hosts ]; then
ed ~/.ssh/known_hosts <<EOF
g/^${MY_HOST_1}/d
w
EOF
fi
# add an entry for our host to known_hosts
echo "${MY_HOST_1}" `cat /etc/ssh/ssh_host_rsa_key.pub` >>~/.ssh/known_hosts
chmod 644 ~/.ssh/known_hosts
fi
# check whether doing ssh-add will fix the problem
ps -aef | grep ${USER} | grep ssh-agent | grep -v grep >/dev/null
if [ $? -eq 0 ]; then
# ssh-agent is running, tell it to use the new key
ssh-add
fi
# now try once more (with regular host key checking)
ssh -o PasswordAuthentication=no -o KbdInteractiveDevices=none ${MY_HOST_1} echo "testing ssh ${MY_HOST_1}"
if [ $? -ne 0 ]; then
SSH_FAILED=yes
fi
else
SSH_FAILED=yes
fi
fi
if [ $SSH_FAILED = yes ]; then
# A few initial steps that are required:
cat <<EOF
Please make sure you can do ssh ${MY_HOST_1} without having to enter a password
(this is a one-time setup):
cd
rm -rf .ssh
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 644 ~/.ssh/authorized_keys
ssh-add
EOF
if [ -z "$MY_IMPLICIT_Y" ]; then
echo "Would you like to configure ssh to ${MY_HOST_1}? This will wipe out your existing"
echo "~/.ssh directory and you will lose existing private key files."
echo " "
echo -n "Enter y/n (n): "
read YN
else
# user already enabled on command line
YN=$MY_IMPLICIT_Y
fi
if [ "$YN" = "y" -o "$YN" = "Y" ]; then
echo "Setting up public/private key pair for connection to ${MY_HOST_1}..."
echo "Saving the old ~/.ssh directory to ~/.ssh-renamed, in case this is not what you wanted"
cd
rm -rf .ssh-renamed
mv -f .ssh .ssh-renamed
# generate an RSA public/private key pair without a passphrase
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
# copy public key to authorized_keys and set permissions correctly
chmod 600 ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 644 ~/.ssh/authorized_keys
# avoid the interactive question to authenticate localhost
# as a valid host (this might cause problems when running
# this script in batch mode)
echo "${MY_HOST_1}" `cat /etc/ssh/ssh_host_rsa_key.pub` >~/.ssh/known_hosts
chmod 644 ~/.ssh/known_hosts
# disable checks for "localhost", so we can use this on multiple machines
echo "NoHostAuthenticationForLocalhost=yes" >>~/.ssh/config
chmod 644 ~/.ssh/config
ps -aef | grep ${USER} | grep ssh-agent | grep -v grep >/dev/null
if [ $? -eq 0 ]; then
# ssh-agent is running, tell it to use the new key
ssh-add
fi
# now try once more (with regular host key checking)
ssh -o PasswordAuthentication=no -o KbdInteractiveDevices=none ${MY_HOST_1} echo "testing ssh ${MY_HOST_1}"
if [ $? -ne 0 ]; then
SSH_FAILED=yes
fi
else
echo "================================================="
echo "exiting, set up ssh access on your own,"
echo "remove ${MY_SW_ROOT} and rerun this"
echo "installation script, $0"
echo "================================================="
exit 1
fi
else
echo "Check ssh ${MY_HOST_1} access without a password succeeded."
fi
# end of ssh checking
}
function listports {
# Report values selected for ports
VARS_FOR_PORTS="
MY_DCS_MASTER_INFO_PORT
MY_DCS_MASTER_PORT
MY_DCS_SERVER_INFO_PORT
MY_HADOOP_DN_HTTP_PORT_NUM
MY_HADOOP_DN_IPC_PORT_NUM
MY_HADOOP_DN_PORT_NUM
MY_HADOOP_HDFS_PORT_NUM
MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM
MY_HADOOP_NN_HTTP_PORT_NUM
MY_HADOOP_SECONDARY_NN_PORT_NUM
MY_HADOOP_SHUFFLE_PORT_NUM
MY_HADOOP_TASK_TRACKER_PORT_NUM
MY_HBASE_MASTER_INFO_PORT_NUM
MY_HBASE_MASTER_PORT_NUM
MY_HBASE_REGIONSERVER_INFO_PORT_NUM
MY_HBASE_REGIONSERVER_PORT_NUM
MY_HBASE_REST_PORT_NUM
MY_HBASE_ZOOKEEPER_LEADERPORT_NUM
MY_HBASE_ZOOKEEPER_PEERPORT_NUM
MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM
MY_REST_SERVER_PORT
MY_REST_SERVER_SECURE_PORT
MY_SQL_PORT_NUM
MY_YARN_ADMIN_PORT_NUM
MY_YARN_HTTP_PORT_NUM
MY_YARN_LOCALIZER_PORT_NUM
MY_YARN_NM_PORT_NUM
MY_YARN_RESMAN_PORT_NUM
MY_YARN_SCHED_PORT_NUM
MY_YARN_TRACKER_PORT_NUM"
for AVAR in $VARS_FOR_PORTS; do
AVALUE="$(eval "echo \$$AVAR")"
printf '%s=%s\n' $AVAR $AVALUE
done
echo
}
# global config
# directory on local disk that belongs to me
MY_SW_PARENT=$MY_SQROOT/sql
# name of subdirectory to install Hadoop/Hive/MySQL in
MY_SW_ROOT=$MY_SW_PARENT/local_hadoop
# directory where convenience scripts will be located
MY_SW_SCRIPTS_DIR=$MY_SW_PARENT/scripts
MYSQL_HOME=${MY_SW_ROOT}/mysql
YARN_HOME=${MY_SW_ROOT}/hadoop
HIVE_HOME=${MY_SW_ROOT}/hive
HBASE_HOME=${MY_SW_ROOT}/hbase
# shared location on local network for tar balls for Hadoop, etc.
# - Hadoop
# - HBase
# - Hive
# - MySQL (used as Hive metastore DB)
# - MySQL connector for Java (used by Hive to access metastore)
# - TPC-DS from tpc.org (for Hive sample DB)
# ------------- please customize this line ----------------------
if [ -z "$MY_LOCAL_SW_DIST" ]; then
MY_LOCAL_SW_DIST=/add_your_local_shared_folder_here
fi
# also set http_proxy and ftp_proxy if necessary, to download
# files from repositories on the Internet
# hosts, only localhost or the actual DNS name of the local host are supported for now
############################################
MY_HOST_1=localhost
# this may get changed below to a DNS name, with -d command line option
MY_LOG_FILE=${MY_SW_ROOT}/log/install_local_hadoop_$(date +%F_%T).log
# multiple hosts not yet supported
# locations for storing data and metadata
##########################################
MY_DATA_DIR=${MY_SW_ROOT}/data
MY_SQL_DATA_DIR=${MY_DATA_DIR}/mysql
MY_DERBY_DATA_DIR=${MY_DATA_DIR}/derby
MY_HADOOP_DATA_DIR=${MY_DATA_DIR}/hadoop
MY_HIVE_DATA_DIR=${MY_DATA_DIR}/hive
MY_HBASE_DATA_DIR=${MY_DATA_DIR}/hbase
# Administrator and Hive user name for MySQL installation
MY_SQL_ADMIN=root
MY_SQL_USER=$MY_SQL_ADMIN
# generate a random password to use for MySQL (no spaces or special characters)
MY_SQL_ADMIN_PASSWD=p${RANDOM}${RANDOM}
MY_SQL_USER_PASSWD=$MY_SQL_ADMIN_PASSWD
# database name for Hive metastore
MY_SQL_METASTORE_DB=metastore
# MySQL configuration file
MY_SQL_CONFIG_FILE=${MYSQL_HOME}/my.cnf
MY_CMD=$0
MY_IMPLICIT_Y=
NOACTION=
VERBOSE=
MISSING_P_VAL="ERROR: -p option should be followed by <start port num>, rand or fromDisplay"
# process command line arguments
################################
while [ $# -gt 0 ];
do
case $1 in
-p) shift
if [[ -z "$1" ]]; then
echo "$MISSING_P_VAL"
exit 1
fi
MY_START_PORT=$1
if [ $MY_START_PORT != "rand" -a $MY_START_PORT != "fromDisplay" ]; then
test $MY_START_PORT -gt 0 >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "$MISSING_P_VAL"
exit 1
fi
fi
;;
-y) MY_IMPLICIT_Y="y"
;;
-d) # use DNS name instead of "localhost" (option is not currently recommended)
MY_HOST_1=`uname -a | cut -f 2 -d ' '`
;;
-h|-help)
usage
exit 0
;;
-n) NOACTION="y"
;;
-v) VERBOSE="y"
;;
*) echo "ERROR: Unexpected argument $1"
echo
cat <<EOF
Syntax: $0 [ -p [<starting port num> | rand | fromDisplay ] ] [-y] [-n] [-v]
EOF
exit 1
;;
esac
shift
done
# port numbers used
####################
# From http://blog.cloudera.com/blog/2009/08/hadoop-default-ports-quick-reference/
# and http://hbase.apache.org/book/config.files.html
USE_DEFAULT_PORTS=no
# To be done later, use separate ports to allow multiple
# Hadoop instances on the same Linux node
if [ -z "$MY_START_PORT" ]; then
USE_DEFAULT_PORTS=yes
MY_START_PORT=50000
else
if [ $MY_START_PORT == "fromDisplay" ]; then
# display :34.0 would result in starting port 53400 (instead of default 50000 range)
MY_START_PORT=${DISPLAY/*:/}
MY_START_PORT=${MY_START_PORT/.*/}
MY_START_PORT=`expr 50000 + $MY_START_PORT '*' 100`
elif [ $MY_START_PORT == "rand" ]; then
# pick a random number between 9000 and 49000 that is divisible by 200
MY_START_PORT=`expr $RANDOM '%' 200 '*' 200 + 9000`
fi
echo "# Using non-standard port range from MY_START_PORT env var: $MY_START_PORT..."
fi
# assign ports with defaults outside the range of 50000-50199
if [ $USE_DEFAULT_PORTS == 'yes' ]; then
# fs.default.name
MY_HADOOP_HDFS_PORT_NUM=9000
# mapreduce.shuffle.port
MY_HADOOP_SHUFFLE_PORT_NUM=8080
# yarn.resourcemanager.address
MY_YARN_RESMAN_PORT_NUM=8032
# yarn.resourcemanager.scheduler.address
MY_YARN_SCHED_PORT_NUM=8030
# yarn.resourcemanager.webapp.address
MY_YARN_HTTP_PORT_NUM=8088
# yarn.resourcemanager.resource-tracker.address
MY_YARN_TRACKER_PORT_NUM=8031
# yarn.resourcemanager.admin.address
MY_YARN_ADMIN_PORT_NUM=8033
# yarn.nodemanager.localizer.address
MY_YARN_LOCALIZER_PORT_NUM=8040
# yarn.nodemanager.webapp.address
MY_YARN_NM_PORT_NUM=8041
# unique port # for MySQL (don't use default of 3306, often used already)
MY_SQL_PORT_NUM=3346
# hbase.master.port
MY_HBASE_MASTER_PORT_NUM=60000
# hbase.master.info.port
MY_HBASE_MASTER_INFO_PORT_NUM=60010
# hbase.regionserver.port
MY_HBASE_REGIONSERVER_PORT_NUM=60020
# hbase.regionserver.info.port
MY_HBASE_REGIONSERVER_INFO_PORT_NUM=60030
# hbase.zookeeper.peerport
MY_HBASE_ZOOKEEPER_PEERPORT_NUM=2888
# hbase.zookeeper.leaderport
MY_HBASE_ZOOKEEPER_LEADERPORT_NUM=3888
# hbase.zookeeper.property.clientPort
MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM=2181
# hbase.rest.port
MY_HBASE_REST_PORT_NUM=8080
# dcs.master.port (range of port numbers, one per server)
MY_DCS_MASTER_PORT=23400
# dcs.master.info.port
MY_DCS_MASTER_INFO_PORT=24400
# dcs.server.info.port
MY_DCS_SERVER_INFO_PORT=24410
# trafodion.rest.port
MY_REST_SERVER_PORT=4200
# trafodion.rest.https.port
MY_REST_SERVER_SECURE_PORT=4201
else
# fs.default.name
MY_HADOOP_HDFS_PORT_NUM=$MY_START_PORT
MY_HADOOP_SHUFFLE_PORT_NUM=`expr $MY_START_PORT + 62`
MY_YARN_RESMAN_PORT_NUM=`expr $MY_START_PORT + 132`
MY_YARN_SCHED_PORT_NUM=`expr $MY_START_PORT + 130`
MY_YARN_HTTP_PORT_NUM=`expr $MY_START_PORT + 188`
MY_YARN_TRACKER_PORT_NUM=`expr $MY_START_PORT + 131`
MY_YARN_ADMIN_PORT_NUM=`expr $MY_START_PORT + 133`
MY_YARN_LOCALIZER_PORT_NUM=`expr $MY_START_PORT + 140`
MY_YARN_NM_PORT_NUM=`expr $MY_START_PORT + 141`
MY_HBASE_MASTER_PORT_NUM=`expr $MY_START_PORT + 160`
MY_HBASE_MASTER_INFO_PORT_NUM=`expr $MY_START_PORT + 161`
MY_HBASE_REGIONSERVER_PORT_NUM=`expr $MY_START_PORT + 162`
MY_HBASE_REGIONSERVER_INFO_PORT_NUM=`expr $MY_START_PORT + 163`
MY_HBASE_ZOOKEEPER_PEERPORT_NUM=`expr $MY_START_PORT + 167`
MY_HBASE_ZOOKEEPER_LEADERPORT_NUM=`expr $MY_START_PORT + 168`
MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM=`expr $MY_START_PORT + 170`
MY_HBASE_REST_PORT_NUM=`expr $MY_START_PORT + 171`
# unique port # for MySQL (default is 3306)
MY_SQL_PORT_NUM=`expr $MY_START_PORT + 46`
# MY_DCS_MASTER_PORT is a range of ports, one per server
# (see "server" file written below for how many are configured)
MY_DCS_MASTER_PORT=`expr $MY_START_PORT + 172`
MY_DCS_MASTER_INFO_PORT=`expr $MY_START_PORT + 181`
MY_DCS_SERVER_INFO_PORT=`expr $MY_START_PORT + 182`
MY_REST_SERVER_PORT=`expr $MY_START_PORT + 183`
MY_REST_SERVER_SECURE_PORT=`expr $MY_START_PORT + 184`
fi
# handle ports in the range of 50000 to 50199
# in hdfs-site.xml (setting any of these to 0 means start on a free port):
# dfs.http.address 50070 dfs namenode web ui
MY_HADOOP_NN_HTTP_PORT_NUM=`expr $MY_START_PORT + 70`
# dfs.secondary.http.address 50090 The secondary namenode http server
MY_HADOOP_SECONDARY_NN_PORT_NUM=`expr $MY_START_PORT + 90`
# dfs.datanode.address 50010 datanode server
MY_HADOOP_DN_PORT_NUM=`expr $MY_START_PORT + 10`
# dfs.datanode.http.address 50075 datanode http server
MY_HADOOP_DN_HTTP_PORT_NUM=`expr $MY_START_PORT + 75`
# dfs.datanode.ipc.address 50020 datanode ipc server
MY_HADOOP_DN_IPC_PORT_NUM=`expr $MY_START_PORT + 20`
# in mapred-site.xml:
# mapred.job.tracker.http.address (default 50030)
MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM=`expr $MY_START_PORT + 30`
# mapred.task.tracker.http.address (default 50060)
MY_HADOOP_TASK_TRACKER_PORT_NUM=`expr $MY_START_PORT + 60`
# others, left as default for now:
# dfs.backup.address (50100)
# dfs.backup.http.address (50105)
# mapred.job.tracker (???)
if [[ -n "$VERBOSE" ]]; then
listports
fi
if [[ -n "$NOACTION" ]]; then
exit 0
fi
# Specify mirrors and versions of needed components
#####################################################
#HADOOP_MIRROR_URL=http://archive.cloudera.com/cdh5/cdh/5
#HADOOP_TAR=hadoop-2.5.0-cdh5.3.0.tar.gz
HADOOP_MIRROR_URL=http://archive.apache.org/dist/hadoop/core/hadoop-2.4.0
HADOOP_TAR=hadoop-2.4.0.tar.gz
if [[ "$SQ_HBASE_DISTRO" = "HDP" ]]; then
HADOOP_TAR=hadoop-2.6.0.2.2.0.0-2041.tar.gz
fi
# Alternative: Use MariaDB (not validated)
# MARIADB_MIRROR_URL=https://downloads.mariadb.org/f/mariadb-5.5.29/kvm-bintar-hardy-amd64/mariadb-5.5.29-linux-x86_64.tar.gz/from/http:/ftp.osuosl.org/pub/mariadb
# MARIADB_TAR=mariadb-5.5.29-linux-x86_64.tar.gz
MYSQL_MIRROR_URL=http://cdn.mysql.com/archives/mysql-5.6
MYSQL_TAR=mysql-5.6.10-linux-glibc2.5-x86_64.tar.gz
MYSQL_JDBC_URL=http://cdn.mysql.com/archives/mysql-connector-java-5.1
MYSQL_JDBC_TAR=mysql-connector-java-5.1.23.tar.gz
HIVE_MIRROR_URL=https://archive.apache.org/dist/hive/hive-0.13.1
HIVE_PREFIX=apache-hive-0.13.1-bin
HIVE_TAR=${HIVE_PREFIX}.tar.gz
#HBASE_MIRROR_URL=http://psg.mtu.edu/pub/apache/hbase/hbase-0.98.3
#HBASE_MIRROR_URL=http://archive.cloudera.com/cdh5/cdh/5
HBASE_MIRROR_URL=http://archive.apache.org/dist/hbase/hbase-0.98.10/
#HBASE_TAR=hbase-0.98.6-cdh5.3.0.tar.gz
HBASE_TAR=hbase-0.98.10-hadoop2-bin.tar.gz
#HBASE_TAR=hbase-0.98.4-hadoop2-bin.tar.gz
if [[ "$SQ_HBASE_DISTRO" = "HDP" ]]; then
HBASE_TAR=hbase-0.98.4.2.2.0.0-2041-hadoop2.tar.gz
fi
echo "Checking for existing Hadoop processes..."
if [ `netstat -anl | grep ${MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM} | grep LISTEN | wc -l` -gt 0 -o \
`netstat -anl | grep ${MY_HADOOP_NN_HTTP_PORT_NUM} | grep LISTEN | wc -l` -gt 0 ]; then
echo '**** ERROR:'
echo "A process is already listening to port ${MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM} or ${MY_HADOOP_NN_HTTP_PORT_NUM}."
echo "This could be your own HDFS web interface or that of someone else."
echo "Please shut Hadoop down first or switch to another machine."
if [ -n "$DISPLAY" ]; then
echo "Alternatively, use non-standard ports with this option:"
echo "$MY_CMD -p fromDisplay"
fi
exit 1
fi
# check for missing tpcds_kit.zip file
install_hadoop_regr_test_env --check
if [ $? -ne 0 ]; then
exit 1
fi
if [ -d "$MY_SW_ROOT" ]; then
echo "$MY_SW_ROOT already exists, skipping initial steps..."
else
echo
echo "Creating common directory $MY_SW_ROOT..."
#####################################################
mkdir $MY_SW_ROOT
cd $MY_SW_ROOT
mkdir log
echo
echo "Checking Java version..."
# check JAVA_HOME and Java version (1.6 or 1.7)
if [ -n $JAVA_HOME ]; then
if [ `expr "${JAVA_HOME}" : ".*_32"` -gt 0 ]; then
echo "Using a 32 bit Java environment, JAVA_HOME=${JAVA_HOME}"
echo -n "This might not be optimal. Ok to switch to a 64 bit Java environment? Enter y/n (n):"
read YN
if [ "$YN" == "y" -o "$YN" == "Y" ]; then
unset JAVA_HOME
else
echo "Ok, continuing with 32 bit Java..."
fi
fi
if [ -n "$JAVA_HOME" ]; then
echo "Picked up JAVA_HOME=${JAVA_HOME} from the environment..."
fi
fi
if [ -z "$JAVA_HOME" ]; then
echo "Trying to determine JAVA_HOME..."
JAVA_HOME_CANDIDATES="\
/opt/home/tools/jdk1.6.*_64 \
/opt/home/tools/jdk1.7.*_64"
# Add the directory of the java executable in the path to the candidates
JAVA_EXE=`which java`
if [ $? -eq 0 ]; then
# follow symbolic links until we reach the actual file
while [ -L $JAVA_EXE ]; do
JAVA_EXE=`readlink $JAVA_EXE`
done
JAVA_DIR=`dirname $JAVA_EXE`
JAVA_HOME_CANDIDATES="$JAVA_HOME_CANDIDATES $JAVA_DIR"
fi
cd $MY_SW_ROOT
# loop through candidates, use the last one that has a
# java executable in it (preference for later versions and path)
for c in $JAVA_HOME_CANDIDATES
do
if [ -x $c/bin/java ]; then
JAVA_HOME=$c
fi
done
echo "Setting JAVA_HOME=${JAVA_HOME}"
fi
JAVA_EXE=$JAVA_HOME/bin/java
if [ -z "$JAVA_HOME" -o \( ! -x $JAVA_EXE \) ]; then
echo '**** ERROR:'
echo "JAVA_HOME environment variable $JAVA_HOME doesn't point to a java executable, exiting..."
exit 1
fi
$JAVA_EXE -version
(${JAVA_EXE} -version 2>&1) | grep '1.6' >/dev/null
if [ $? -ne 0 ]; then
(${JAVA_EXE} -version 2>&1) | grep '1.7' >/dev/null
if [ $? -ne 0 ]; then
echo '**** ERROR:'
cat <<EOF
Please make sure you are using the Java 1.6 or 1.7 SDK.
Otherwise, download it into ${MY_SW_ROOT}, extract it and
make a symbolic link ${MY_SW_ROOT}/java that points to it and
export JAVA_HOME=${MY_SW_ROOT}/java
EOF
echo exiting...
exit 1
else
echo "Java version 1.7 is ok"
fi
else
echo "Java version 1.6 is ok"
fi
# end of check Java version
echo
echo "Creating some convenience shell scripts in $MY_SW_SCRIPTS_DIR..."
if [ ! -d $MY_SW_SCRIPTS_DIR ]; then
mkdir $MY_SW_SCRIPTS_DIR
fi
# shell script to set up common environment variables
cat <<EOF >$MY_SW_SCRIPTS_DIR/sw_env.sh
# Basic environment variables for Trafodion/Hadoop/Hive/HBase/MySQL setup
export JAVA_HOME=${JAVA_HOME}
export MYSQL_HOME=${MYSQL_HOME}
export YARN_HOME=${YARN_HOME}
export HIVE_HOME=${HIVE_HOME}
export HBASE_HOME=${HBASE_HOME}
export MY_HADOOP_HDFS_PORT_NUM=${MY_HADOOP_HDFS_PORT_NUM}
EOF
# now source in this script
. $MY_SW_SCRIPTS_DIR/sw_env.sh
####################################
# scripts to start/stop environment
####################################
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstartall
#!/bin/sh
echo "Starting Hadoop, MySQL, HBase..."
cd ${MY_SW_SCRIPTS_DIR}
./swstarthadoop
./swstartmysql
./swstarthbase
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstopall
#!/bin/sh
echo "Stopping Hadoop, MySQL, HBase..."
cd ${MY_SW_SCRIPTS_DIR}
./swstophbase
./swstophadoop
./swstopmysql
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstarthadoop
#!/bin/sh
echo "Starting Hadoop..."
cd ${MY_SW_ROOT}
. $MY_SW_SCRIPTS_DIR/sw_env.sh
./hadoop/sbin/start-dfs.sh
./hadoop/sbin/start-yarn.sh
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstophadoop
#!/bin/sh
echo "Stopping Hadoop..."
cd ${MY_SW_ROOT}
. $MY_SW_SCRIPTS_DIR/sw_env.sh
./hadoop/sbin/stop-yarn.sh
./hadoop/sbin/stop-dfs.sh
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstartmysql
#!/bin/sh
echo "Starting mysqld..."
cd ${MY_SW_ROOT}/mysql
. $MY_SW_SCRIPTS_DIR/sw_env.sh
./bin/mysqld_safe --defaults-file=${MY_SQL_CONFIG_FILE} --log-error=${MY_SW_ROOT}/log/mysqld_safe.\$HOSTNAME.log &
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstopmysql
#!/bin/sh
echo "Stopping mysqld..."
cd ${MY_SW_ROOT}/mysql
. $MY_SW_SCRIPTS_DIR/sw_env.sh
./bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} --password=${MY_SQL_ADMIN_PASSWD} shutdown
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstarthbase
#!/bin/sh
echo "Starting HBase..."
cd ${MY_SW_ROOT}
. $MY_SW_SCRIPTS_DIR/sw_env.sh
./hbase/bin/start-hbase.sh
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstophbase
#!/bin/sh
cd ${MY_SW_ROOT}
. $MY_SW_SCRIPTS_DIR/sw_env.sh
./hbase/bin/stop-hbase.sh
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swstatus
#!/bin/sh
cd ${MY_SW_ROOT}
. $MY_SW_SCRIPTS_DIR/sw_env.sh
NUM_JAVA_PROCS=\`ps -aef | grep \$USER | grep java | grep -v grep | wc -l\`
NUM_MYSQLD_PROCS=\`ps -aef | grep \$USER | grep mysqld | grep -v grep | wc -l\`
if [ "\$1" == "-v" ]; then
ps -aef | grep \$USER | grep java | grep -v grep
ps -aef | grep \$USER | grep mysqld | grep -v grep
fi
echo "\$NUM_JAVA_PROCS java servers and \$NUM_MYSQLD_PROCS mysqld processes are running"
jps | grep -v Jps
EOF
#######################################################
# scripts to start command line interpreters and tools
#######################################################
cat <<EOF >$MY_SW_SCRIPTS_DIR/swhadoop
#!/bin/sh
# command to run hadoop
. $MY_SW_SCRIPTS_DIR/sw_env.sh
${MY_SW_ROOT}/hadoop/bin/hadoop \$*
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swyarn
#!/bin/sh
# command to run yarn
. $MY_SW_SCRIPTS_DIR/sw_env.sh
${MY_SW_ROOT}/hadoop/bin/yarn \$*
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swhdfs
#!/bin/sh
# command to run hadoop
. $MY_SW_SCRIPTS_DIR/sw_env.sh
${MY_SW_ROOT}/hadoop/bin/hdfs \$*
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swmysql
#!/bin/sh
# command to run mysql
. $MY_SW_SCRIPTS_DIR/sw_env.sh
${MY_SW_ROOT}/mysql/bin/mysql --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_USER} --password=${MY_SQL_USER_PASSWD} --database=${MY_SQL_METASTORE_DB} \$*
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swmysqladmin
#!/bin/sh
# command to run mysqladmin as root user
. $MY_SW_SCRIPTS_DIR/sw_env.sh
${MY_SW_ROOT}/mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} --password=${MY_SQL_ADMIN_PASSWD} \$*
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swhive
#!/bin/sh
# command to run hive command line interpreter
# Pick up MySQL JDBC driver
. $MY_SW_SCRIPTS_DIR/sw_env.sh
export HADOOP_HOME=${MY_SW_ROOT}/hadoop
${MY_SW_ROOT}/hive/bin/hive \$*
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swhbase
#!/bin/sh
# command to run hbase shell
. $MY_SW_SCRIPTS_DIR/sw_env.sh
${MY_SW_ROOT}/hbase/bin/hbase shell \$*
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swuninstall_local_hadoop
#!/bin/sh
# uninstall local Hadoop instance.
. $MY_SW_SCRIPTS_DIR/swstopall
echo "Removing directory $MY_SW_ROOT"
echo "and all of its content."
echo "All Hadoop, HDFS, Hive, HBase content on this local instance will be lost."
echo -n "Is this ok? (y, (n)) "
read YN
if [ "\$YN" = "y" -o "\$YN" = "Y" ]; then
rm -rf $MY_SW_ROOT
echo "Removed $MY_SW_ROOT"
else
echo "Exiting without removing anything..."
fi
EOF
cat <<EOF >$MY_SW_SCRIPTS_DIR/swurls.html
<HTML>
<HEAD>
<title>URLs for local Hadoop Instance</title>
</HEAD>
<BODY>
<pre>
<a href="http://${MY_HOST_1}:${MY_HADOOP_NN_HTTP_PORT_NUM}">HDFS Admin</a>
<a href="http://${MY_HOST_1}:${MY_YARN_HTTP_PORT_NUM}">Yarn</a>
<a href="http://${MY_HOST_1}:${MY_HBASE_MASTER_INFO_PORT_NUM}">HBase Master</a>
<a href="http://${MY_HOST_1}:${MY_DCS_MASTER_INFO_PORT}">DCS Master</a>
</pre>
</BODY>
</HTML>
EOF
mkdir -p $MY_SQROOT/etc
SYSTEM_DEFAULTS_TEXT_FILE=$MY_SQROOT/etc/SQSystemDefaults.conf
# alternative method for open source build, system defaults in a text file
cat <<EOF >${SYSTEM_DEFAULTS_TEXT_FILE}.temp
#+-+-+ install_local_hadoop inserted this - please do not edit this section
# No default settings added by install_local_hadoop at this time
#-+-+- install_local_hadoop end of system-inserted values, please add your own custom values below
EOF
# preserve any other text in the system defaults text file, remove old generated text
if [ -r ${SYSTEM_DEFAULTS_TEXT_FILE} ]; then
sed '/#+-+-+ install_local_hadoop /,/#-+-+- install_local_hadoop /d' <${SYSTEM_DEFAULTS_TEXT_FILE} >>${SYSTEM_DEFAULTS_TEXT_FILE}.temp
fi
# now overwrite the system defaults text file
cat ${SYSTEM_DEFAULTS_TEXT_FILE}.temp >${SYSTEM_DEFAULTS_TEXT_FILE}
# secure all shell scripts to be executable
chmod +x $MY_SW_SCRIPTS_DIR/sw*
echo
echo "Checking permissions on home directory..."
ls -ld ~
ls -ld ~ | egrep 'drwx.-..-.' >/dev/null
if [ $? -ne 0 ]; then
echo '**** ERROR:'
cat <<EOF
Check permissions on your home directory. Authentication with
public/private keys won't work if you allow write access to
your home directory. You can fix this by doing something like
the following:
chmod 755 ~
exiting, please correct and retry this script...
EOF
exit
fi
# check password-less login via ssh
check_ssh
fi
# end of general setup of sw directory
cd $MY_SW_ROOT
echo
if [ -d hadoop/bin ]; then
echo "Hadoop files already exist, skipping Hadoop setup"
else
echo "Setting up Hadoop..."
#####################################################
if [ -f $MY_LOCAL_SW_DIST/${HADOOP_TAR} ]; then
cp $MY_LOCAL_SW_DIST/${HADOOP_TAR} .
else
curl -O ${HADOOP_MIRROR_URL}/${HADOOP_TAR}
fi
echo "Unpacking Hadoop tar file..."
tar -xf ${HADOOP_TAR}
rm -rf hadoop
ln -s `dirname hadoop-*/lib` hadoop
H_CORE_FILE=hadoop/etc/hadoop/core-site.xml
H_HDFS_FILE=hadoop/etc/hadoop/hdfs-site.xml
H_MAPRED_FILE=hadoop/etc/hadoop/mapred-site.xml
H_Y_SITE_FILE=hadoop/etc/hadoop/yarn-site.xml
H_ENV_FILE=hadoop/etc/hadoop/hadoop-env.sh
H_Y_ENV_FILE=hadoop/etc/hadoop/yarn-env.sh
echo "Updating files $H_CORE_FILE, $H_HDFS_FILE and $H_MAPRED_FILE ..."
cat <<EOF >$H_ENV_FILE
export HADOOP_COMMON_LIB_NATIVE_DIR=${MY_SW_ROOT}/hadoop/lib/native
export HADOOP_OPTS="-Djava.library.path=$MY_SQ_ROOT/hadoop/lib"
EOF
mv -f $H_CORE_FILE $H_CORE_FILE.orig
cat <<EOF >$H_CORE_FILE
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://${MY_HOST_1}:${MY_HADOOP_HDFS_PORT_NUM}</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>${MY_HADOOP_DATA_DIR}/tmp</value>
</property>
</configuration>
EOF
mv -f $H_HDFS_FILE $H_HDFS_FILE.orig
cat <<EOF >$H_HDFS_FILE
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>${MY_HADOOP_DATA_DIR}/dfs/data</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file://${MY_HADOOP_DATA_DIR}/dfs/name</value>
</property>
<property>
<name>dfs.http.address</name>
<value>${MY_HOST_1}:${MY_HADOOP_NN_HTTP_PORT_NUM}</value>
</property>
<property>
<name>dfs.secondary.http.address</name>
<value>${MY_HOST_1}:${MY_HADOOP_SECONDARY_NN_PORT_NUM}</value>
</property>
<property>
<name>dfs.datanode.address</name>
<value>${MY_HOST_1}:${MY_HADOOP_DN_PORT_NUM}</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>${MY_HOST_1}:${MY_HADOOP_DN_HTTP_PORT_NUM}</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>${MY_HOST_1}:${MY_HADOOP_DN_IPC_PORT_NUM}</value>
</property>
<property>
<name>dfs.namenode.acls.enabled</name>
<value>true</value>
</property>
</configuration>
EOF
if [ -r $H_MAPRED_FILE ]; then
mv -f $H_MAPRED_FILE $H_MAPRED_FILE.orig
fi
cat <<EOF >$H_MAPRED_FILE
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobtracker.http.address</name>
<value>${MY_HOST_1}:${MY_HADOOP_JOB_TRACKER_HTTP_PORT_NUM}</value>
</property>
<property>
<name>mapreduce.tasktracker.http.address</name>
<value>${MY_HOST_1}:${MY_HADOOP_TASK_TRACKER_PORT_NUM}</value>
</property>
<property>
<name>mapreduce.shuffle.port</name>
<value>${MY_HADOOP_SHUFFLE_PORT_NUM}</value>
</property>
</configuration>
EOF
mv -f $H_Y_SITE_FILE $H_Y_SITE_FILE.orig
cat <<EOF >$H_Y_SITE_FILE
<?xml version="1.0"?>
<configuration>
<property>
<name>yarn.resourcemanager.address</name>
<value>${MY_HOST_1}:${MY_YARN_RESMAN_PORT_NUM}</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>${MY_HOST_1}:${MY_YARN_SCHED_PORT_NUM}</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address </name>
<value>${MY_HOST_1}:${MY_YARN_HTTP_PORT_NUM}</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address </name>
<value>${MY_HOST_1}:${MY_YARN_TRACKER_PORT_NUM}</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address </name>
<value>${MY_HOST_1}:${MY_YARN_ADMIN_PORT_NUM}</value>
</property>
<property>
<name>yarn.nodemanager.localizer.address </name>
<value>${MY_HOST_1}:${MY_YARN_LOCALIZER_PORT_NUM}</value>
</property>
<property>
<name>yarn.nodemanager.webapp.address</name>
<value>${MY_HOST_1}:${MY_YARN_NM_PORT_NUM}</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
<description>shuffle service that needs to be set for Map Reduce to run </description>
</property>
</configuration>
EOF
echo "Appending local configuration to $H_ENV_FILE..."
cat <<EOF >>$H_ENV_FILE
# Trafodion-local configuration
# Make sure JAVA_HOME is set
export JAVA_HOME=${JAVA_HOME}
# Use a local PID dir to avoid conflicts with other Hadoop instances
export HADOOP_PID_DIR=${MY_SW_ROOT}/log
EOF
echo "Appending local configuration to $H_Y_ENV_FILE..."
cat <<EOF >>$H_Y_ENV_FILE
# Trafodion-local configuration
# Make sure JAVA_HOME is set
export JAVA_HOME=${JAVA_HOME}
export HADOOP_CONF_DIR=${YARN_HOME}/etc/hadoop
export HADOOP_COMMON_HOME=${YARN_HOME}
export HADOOP_HDFS_HOME=${YARN_HOME}
# Use a local PID dir to avoid conflicts with other Hadoop instances
export HADOOP_PID_DIR=${MY_SW_ROOT}/log
EOF
#####################################################
echo "Initializing and starting Hadoop..." | tee ${MY_LOG_FILE}
cd $MY_SW_ROOT/hadoop
. $MY_SW_SCRIPTS_DIR/sw_env.sh
bin/hdfs namenode -format >>${MY_LOG_FILE} 2>&1
$MY_SW_SCRIPTS_DIR/swstophadoop >>${MY_LOG_FILE} 2>&1
$MY_SW_SCRIPTS_DIR/swstarthadoop >>${MY_LOG_FILE} 2>&1
echo "Creating HDFS directories" 2>&1 | tee -a ${MY_LOG_FILE}
bin/hdfs dfs -mkdir /tmp >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -mkdir /user >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -mkdir /user/$USER >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -mkdir /user/hive >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -mkdir /user/trafodion >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -mkdir /bulkload >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -mkdir /user/trafodion/bulkload >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -mkdir /user/hive/warehouse >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -mkdir /hive >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -chmod g+w /tmp >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -chmod g+w /user/hive/warehouse >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -chmod g+w /bulkload >>${MY_LOG_FILE} 2>&1
bin/hdfs dfs -chmod g+w /user/trafodion/bulkload >>${MY_LOG_FILE} 2>&1
bin/hadoop fs -ls -R / 2>&1 | tee -a ${MY_LOG_FILE}
echo "Done: Creating HDFS directories" 2>&1 | tee -a ${MY_LOG_FILE}
fi
# end of Hadoop (MapReduce + HDFS) setup
cd $MY_SW_ROOT
if [ -d mysql/bin ]; then
echo "MySQL files already exist, skipping MySQL setup"
else
#####################################################
echo "Downloading MySQL..."
if [ -f $MY_LOCAL_SW_DIST/${MYSQL_TAR} ]; then
cp $MY_LOCAL_SW_DIST/${MYSQL_TAR} .
else
curl ${MYSQL_MIRROR_URL}/${MYSQL_TAR} -o ${MYSQL_TAR}
fi
echo "Unpacking MySQL tar file ${MYSQL_TAR} ..."
tar -xf ${MYSQL_TAR}
echo "Creating symbolic link to latest MySQL distribution..."
rm -rf mysql
ln -s `dirname mysql-*-linux-*/bin` mysql
# set up MySQL configuration file
if [ -f ${MY_SQL_CONFIG_FILE} ]; then
echo "Using existing MySQL config file ${MY_SQL_CONFIG_FILE}"
else
echo "Setting up MySQL configuration file ${MY_SQL_CONFIG_FILE} ..."
mkdir ${MY_SQL_DATA_DIR}
cat <<EOF >${MY_SQL_CONFIG_FILE}
[client-server]
# Uncomment these if you want to use a nonstandard connection to MySQL
port=${MY_SQL_PORT_NUM}
socket=/tmp/mysql_${MY_SQL_PORT_NUM}.sock
# This will be passed to all MySQL clients
[client]
port=${MY_SQL_PORT_NUM}
socket=/tmp/mysql_${MY_SQL_PORT_NUM}.sock
# The MySQL server
[mysqld]
# port to use (default is 3306)
port=${MY_SQL_PORT_NUM}
socket=/tmp/mysql_${MY_SQL_PORT_NUM}.sock
# Directory where you want to put your data
datadir=${MY_SQL_DATA_DIR}
# File that contains the pid of the running mysqld
pid_file=${MY_SW_ROOT}/log/mysqld.pid
# Directory for error messages
lc-messages-dir=${MY_SW_ROOT}/mysql/share
# Create a file where the InnoDB/XtraDB engine stores it's data
# innodb_data_file_path = ibdata1:20M;ibdata2:40M:autoextend
# innodb_file_per_table
# Enable logging by default to help find problems
general-log=1
general-log-file=${MY_SW_ROOT}/log/mysql-general.${HOSTNAME}.log
EOF
fi
cd mysql
echo "Running MySQL installation script..." | tee -a ${MY_LOG_FILE}
./scripts/mysql_install_db --defaults-file=${MY_SQL_CONFIG_FILE} >>${MY_LOG_FILE} 2>&1
if [ $? -ne 0 ]; then
echo "Problem installing MySQL, see, file ${MY_LOG_FILE}" | tee -a ${MY_LOG_FILE}
fi
# start mysql
echo "Starting mysqld..." | tee -a ${MY_LOG_FILE}
$MY_SW_SCRIPTS_DIR/swstartmysql >>${MY_LOG_FILE} 2>&1
if [ $? -ne 0 ]; then
echo "Problem starting MySQL, see, file ${MY_LOG_FILE}" | tee -a ${MY_LOG_FILE}
fi
fi
# end of MySQL setup
cd $MY_SW_ROOT
if [ -f mysql-connector-java-*/mysql-connector-java-*.jar ]; then
echo "MySQL JDBC driver file already exists, skipping JDBC setup"
else
#####################################################
echo "Downloading MySQL JDBC driver..."
if [ -f $MY_LOCAL_SW_DIST/${MYSQL_JDBC_TAR} ]; then
cp $MY_LOCAL_SW_DIST/${MYSQL_JDBC_TAR} .
else
curl -O ${MYSQL_JDBC_URL}/${MYSQL_JDBC_TAR}
fi
echo "Unpacking MySQL JDBC tar file ${MYSQL_JDBC_TAR} ..."
tar -xf ${MYSQL_JDBC_TAR}
ln -s `dirname mysql-connector-java-*/mysql-connector-java-*.jar` mysql-connector-java
fi
# end of MySQL JDBC setup
cd $MY_SW_ROOT
if [ -d hive/bin ]; then
echo "Hive files already exist, skipping Hive setup"
else
#####################################################
echo "Downloading Hive..."
if [ -f $MY_LOCAL_SW_DIST/${HIVE_TAR} ]; then
cp $MY_LOCAL_SW_DIST/${HIVE_TAR} .
else
curl -O ${HIVE_MIRROR_URL}/${HIVE_TAR}
fi
echo "Unpacking Hive tar file ${HIVE_TAR} ..."
tar -xf ${HIVE_TAR}
echo "Creating symbolic link to latest Hive distribution..."
rm -rf hive
# ln -s `dirname hive-*/bin` hive
ln -s ${HIVE_PREFIX} hive
HIVE_CONFIG_FILE=hive/conf/hive-site.xml
HIVE_LOG_CONFIG_FILE=hive/conf/hive-log4j.properties
echo "Updating file $HIVE_CONFIG_FILE ..."
if [ -r $HIVE_CONFIG_FILE ]; then
mv -f $HIVE_CONFIG_FILE $HIVE_CONFIG_FILE.orig
fi
cat <<EOF >$HIVE_CONFIG_FILE
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>hive.exec.scratchdir</name>
<value>${MY_HIVE_DATA_DIR}</value>
</property>
<property>
<name>hive.metastore.local</name>
<value>true</value>
</property>
<!-- Use MySQL as metastore -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://${MY_HOST_1}:${MY_SQL_PORT_NUM}/${MY_SQL_METASTORE_DB}?createDatabaseIfNotExist=true</value>
<description>JDBC connect string for a JDBC metastore (don't include white space)</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>${MY_SQL_USER}</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>${MY_SQL_USER_PASSWD}</value>
</property>
<!-- end of MySQL metastore parameters -->
<!-- Alternatively, use this to set up Apache Derby as metadata store
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:derby:;databaseName=${MY_DERBY_DATA_DIR}/metastore_db;create=true</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
end of alternative Derby configuration -->
<!-- other useful Hive configuration settings -->
<property>
<name>hive.cli.print.header</name>
<value>true</value>
<description>Whether to print the names of the columns in query output.</description>
</property>
</configuration>
EOF
cat <<EOF >${HIVE_LOG_CONFIG_FILE}
hive.root.logger=WARN,DRFA
hive.log.dir=${MY_SW_ROOT}/log/hive
hive.log.file=hive.log
EOF
echo "Copying MySQL JDBC driver to ${MY_SW_ROOT}/hive/lib"
cp -p ${MY_SW_ROOT}/mysql-connector-java/*.jar ${MY_SW_ROOT}/hive/lib
echo "Sleeping 10 sec to wait for MySQL to start..."
sleep 10
mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} password ${MY_SQL_ADMIN_PASSWD} 2>&1 | tee -a ${MY_LOG_FILE}
mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} --password=${MY_SQL_ADMIN_PASSWD} -h ${MY_HOST_1} password ${MY_SQL_ADMIN_PASSWD} 2>&1 | tee -a ${MY_LOG_FILE}
if [ ${MY_SQL_ADMIN} != ${MY_SQL_USER} ]; then
echo "Creating MySQL user ${MY_SQL_USER}"
${MY_SW_ROOT}/mysql/bin/mysql --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_ADMIN} --password=${MY_SQL_ADMIN_PASSWD} <<EOF 2>&1 | tee -a ${MY_LOG_FILE}
CREATE USER '${MY_SQL_USER}'@'%' IDENTIFIED BY '${MY_SQL_USER_PASSWD}';
EOF
mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_USER} password ${MY_SQL_USER_PASSWD} 2>&1 | tee -a ${MY_LOG_FILE}
mysql/bin/mysqladmin --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_USER} --password=${MY_SQL_USER_PASSWD} -h ${MY_HOST_1} password ${MY_SQL_USER_PASSWD} 2>&1 | tee -a ${MY_LOG_FILE}
fi
echo "Creating Hive database and metastore in MySQL..."
${MY_SW_ROOT}/mysql/bin/mysql --defaults-file=${MY_SQL_CONFIG_FILE} --user=${MY_SQL_USER} --password=${MY_SQL_USER_PASSWD} <<EOF 2>&1 | tee -a ${MY_LOG_FILE}
create database ${MY_SQL_METASTORE_DB};
use ${MY_SQL_METASTORE_DB};
SOURCE ${MY_SW_ROOT}/hive/scripts/metastore/upgrade/mysql/hive-schema-0.13.0.mysql.sql;
EOF
fi
# end of Hive setup
cd $MY_SW_ROOT
if [ -d hbase/bin ]; then
echo "HBase files already exist, skipping HBase setup"
else
#####################################################
echo "Downloading HBase..."
if [ -f $MY_LOCAL_SW_DIST/${HBASE_TAR} ]; then
cp $MY_LOCAL_SW_DIST/${HBASE_TAR} .
else
curl -O ${HBASE_MIRROR_URL}/${HBASE_TAR}
fi
echo "Unpacking HBase tar file ${HBASE_TAR} ..."
tar -xf ${HBASE_TAR}
echo "Creating symbolic link to latest HBase distribution..."
rm -rf hbase
ln -s `dirname hbase-*/bin` hbase
HBASE_CONFIG_FILE=hbase/conf/hbase-site.xml
HBASE_ENV_FILE=hbase/conf/hbase-env.sh
# For HBase_Trx
# Do not build if the TRX jar already exists
if [ ! -e ${MY_SQROOT}/export/lib/${HBASE_TRX_JAR} ]; then
# Build HBase TRX
echo "Building HBase TRX"
cd $MY_SQROOT ; make genverhdr 2>&1 | tee -a ${MY_LOG_FILE}
cd $MY_SQROOT/src/seatrans/hbase-trx
make clean 2>&1 | tee -a ${MY_LOG_FILE}
make 2>&1 | tee -a ${MY_LOG_FILE}
fi
cd $MY_SW_ROOT
# Setup HBase TRX JAR in HBase' CLASSPATH
echo "export HBASE_CLASSPATH=${MY_SQROOT}/export/lib/\${HBASE_TRX_JAR}" >> ${HBASE_ENV_FILE}
echo "Updating file $HBASE_CONFIG_FILE ..."
if [ -r $HBASE_CONFIG_FILE ]; then
mv -f $HBASE_CONFIG_FILE $HBASE_CONFIG_FILE.orig
fi
cat <<EOF >$HBASE_CONFIG_FILE
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://${MY_HOST_1}:${MY_HADOOP_HDFS_PORT_NUM}/hbase</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>hdfs://${MY_HOST_1}:${MY_HADOOP_HDFS_PORT_NUM}/zookeeper</value>
</property>
<property>
<name>hbase.master.port</name>
<value>${MY_HBASE_MASTER_PORT_NUM}</value>
</property>
<property>
<name>hbase.master.info.port</name>
<value>${MY_HBASE_MASTER_INFO_PORT_NUM}</value>
</property>
<property>
<name>hbase.regionserver.port</name>
<value>${MY_HBASE_REGIONSERVER_PORT_NUM}</value>
</property>
<property>
<name>hbase.regionserver.info.port</name>
<value>${MY_HBASE_REGIONSERVER_INFO_PORT_NUM}</value>
</property>
<property>
<name>hbase.zookeeper.peerport</name>
<value>${MY_HBASE_ZOOKEEPER_PEERPORT_NUM}</value>
</property>
<property>
<name>hbase.zookeeper.leaderport</name>
<value>${MY_HBASE_ZOOKEEPER_LEADERPORT_NUM}</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>${MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM}</value>
</property>
<property>
<name>hbase.rest.port</name>
<value>${MY_HBASE_REST_PORT_NUM}</value>
</property>
<property>
<name>hbase.client.scanner.caching</name>
<value>100</value>
</property>
<property>
<name>hbase.client.scanner.timeout.period</name>
<value>60000</value>
</property>
<property>
<name>hbase.bulkload.staging.dir</name>
<value>hdfs://${MY_HOST_1}:${MY_HADOOP_HDFS_PORT_NUM}/hbase-staging</value>
</property>
<property>
<name>hbase.snapshot.enabled</name>
<value>true</value>
</property>
<property>
<name>hbase.master.distributed.log.splitting</name>
<value>false</value>
</property>
<property>
<name>hbase.hregion.impl</name>
<value>org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion</value>
</property>
<property>
<name>hbase.coprocessor.region.classes</name>
<value>
org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,
org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,
org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint,
org.apache.hadoop.hbase.coprocessor.AggregateImplementation
</value>
</property>
</configuration>
EOF
echo "Starting HBase..." | tee -a ${MY_LOG_FILE}
$MY_SW_SCRIPTS_DIR/swstarthbase >>${MY_LOG_FILE} 2>&1
if [ $? -ne 0 ]; then
echo "Problem starting HBase, see, file ${MY_LOG_FILE}" | tee -a ${MY_LOG_FILE}
fi
fi
# end of HBase setup
cd $MY_SW_ROOT
if [ -d tpcds/tools ]; then
echo "TPC-DS files already exist, skipping TPC-DS setup"
else
install_hadoop_regr_test_env \
--unpackDir=$MY_SW_ROOT/tpcds \
--dataDir=${MY_DATA_DIR}/tpcds \
--logFile=$MY_LOG_FILE \
--hdfsCmd=${MY_SW_SCRIPTS_DIR}/swhdfs \
--hiveCmd=${MY_SW_SCRIPTS_DIR}/swhive
if [ $? -ne 0 ]; then
echo "Error installing TPC-DS and ORC files, exiting..."
exit 1
fi
fi
# end of TPC-DS setup
cd $MY_SW_ROOT
echo "Setting up DCS, REST and Phoenix tests..."
#Default GIT location
GIT_DIR="git@github.com:apache/incubator-trafodion"
DCS_SRC=$MY_SQROOT/../../dcs
if [ -d $DCS_SRC ]; then
TRAF_SRC=$MY_SQROOT/../../
# Default location of DCS code
DCS_SRC=$MY_SQROOT/../../dcs
# Default location of REST code
REST_SRC=$MY_SQROOT/../rest
# Default location for phoenix_test
PHX_SRC=$MY_SQROOT/../../tests/phx
else
TRAF_SRC=$MY_SW_ROOT/src/incubator-trafodion
if [ ! -d $TRAF_SRC ]; then
mkdir -p $MY_SW_ROOT/src
cd $MY_SW_ROOT/src
git clone $GIT_DIR
fi
# Default location of DCS code
DCS_SRC=$TRAF_SRC/dcs
# Default location of REST code
REST_SRC=$TRAF_SRC/core/rest
# Default location for phoenix_test
PHX_SRC=$TRAF_SRC/tests/phx
fi
echo "Default Trafodion Source directory..."
echo "For Core... $TRAF_SRC"
echo "For DCS... $DCS_SRC"
echo "For REST... $REST_SRC"
echo "For PHX... $PHX_SRC"
if [ -d dcs-* ]; then
echo "DCS files already exist, skipping DCS setup"
else
#####################################################
# four options, depend on user Env variables, as described above
if [[ -f $DCS_TAR ]]
then
echo "Using DCS Tar: $DCS_TAR" | tee -a ${MY_LOG_FILE}
elif [[ -n $DCS_URL ]]
then
echo "Downloading DCS Tar: $DCS_URL" | tee -a ${MY_LOG_FILE}
rm -f dcs_download.tar
curl ${DCS_URL} -o dcs_download.tar
DCS_TAR=./dcs_download.tar
elif [[ -d $DCS_SRC ]]
then
if [[ -f $DCS_SRC/target/dcs*tar.gz ]]
then
echo "Using DCS tar file in source tree: $DCS_SRC" | tee -a ${MY_LOG_FILE}
else
echo "No DCS tar file found, building DCS: $DCS_SRC" | tee -a ${MY_LOG_FILE}
echo "Building DCS Source in $DCS_SRC" | tee -a ${MY_LOG_FILE}
cd $DCS_SRC
${MAVEN:-mvn} site package >>${MY_LOG_FILE} 2>&1
cd $MY_SW_ROOT
fi
DCS_TAR=$(ls $DCS_SRC/target/dcs*tar.gz)
fi
if [[ ! -f $DCS_TAR ]]
then
echo '**** ERROR:' | tee -a ${MY_LOG_FILE}
echo "DCS tar file not found: $DCS_TAR" | tee -a ${MY_LOG_FILE}
exit 2
fi
# install
echo "Installing DCS from: $DCS_TAR" | tee -a ${MY_LOG_FILE}
tar xzf $DCS_TAR
DCS_HOME=$(ls -d $MY_SW_ROOT/dcs-*)
# configure DCS
# use ~/.trafodion to avoid modifying sqenv*.sh in source tree
echo "Adding DCS_INSTALL_DIR=$DCS_INSTALL_DIR to sqenv via ~/.trafodion" | tee -a ${MY_LOG_FILE}
echo " Update it if switching between multiple local_hadoop environments" | tee -a ${MY_LOG_FILE}
if [[ -f ~/.trafodion ]]
then
mv -f ~/.trafodion ~/.trafodion.orig
grep -v 'DCS_INSTALL_DIR=' ~/.trafodion.orig > ~/.trafodion
fi
DCSDIR=${DCS_HOME##*/}
echo "export DCS_INSTALL_DIR=\${MY_SQROOT}/sql/local_hadoop/$DCSDIR" >> ~/.trafodion
cd $DCS_HOME/conf/
mv dcs-env.sh dcs-env.sh.orig
# SQROOT env var does not propagate thru ssh command
echo "MY_SQROOT=$MY_SQROOT" > dcs-env.sh
sed -e "s@#[ ]*export DCS_MANAGES_ZK=true@export DCS_MANAGES_ZK=false@" dcs-env.sh.orig >> dcs-env.sh
mv -f dcs-site.xml dcs-site.xml.orig
sed -e "s@</configuration>@@" dcs-site.xml.orig > dcs-site.xml
cat >>dcs-site.xml <<EOF
<property>
<name>dcs.master.port</name> <value>$MY_DCS_MASTER_PORT</value>
</property>
<property>
<name>dcs.master.info.port</name> <value>$MY_DCS_MASTER_INFO_PORT</value>
</property>
<property>
<name>dcs.server.info.port</name> <value>$MY_DCS_SERVER_INFO_PORT</value>
</property>
<property>
<name>dcs.zookeeper.peerport</name> <value>$MY_HBASE_ZOOKEEPER_PEERPORT_NUM</value>
</property>
<property>
<name>dcs.zookeeper.leaderport</name> <value>$MY_HBASE_ZOOKEEPER_LEADERPORT_NUM</value>
</property>
<property>
<name>dcs.zookeeper.property.clientPort</name> <value>$MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM</value>
</property>
</configuration>
EOF
echo "localhost 4" > servers
# Configure DCS test scripts
if [[ -n "$DCS_SRC" ]]
then
echo "Adding swjdbc script...." | tee -a ${MY_LOG_FILE}
cat <<EOF >$MY_SW_SCRIPTS_DIR/swjdbc
#!/bin/sh
# command to run JDBC tests
cd $DCS_SRC/src/test/jdbc_test
./jdbc_test.py --appid=jdbc_test --user=SOMEUSER --pw=SOMEPASSWORD --javahome=\$JAVA_HOME \\
--target=localhost:$MY_DCS_MASTER_PORT \\
--jdbctype=T4 --jdbccp=\$MY_SQROOT/export/lib/jdbcT4.jar "\$@"
EOF
chmod +x $MY_SW_SCRIPTS_DIR/swjdbc
fi
fi
# end of DCS setup
# begin of trafci setup
echo "Updating trafci port number" | tee -a ${MY_LOG_FILE}
TRAFCI_BIN_DIR=$MY_SQROOT/trafci/bin
if [[ -f $TRAFCI_BIN_DIR/trafci ]]
then
mv $TRAFCI_BIN_DIR/trafci $TRAFCI_BIN_DIR/trafci.orig | tee -a ${MY_LOG_FILE}
sed -e "s@localhost:23400@localhost:$MY_DCS_MASTER_PORT@" $TRAFCI_BIN_DIR/trafci.orig >> $TRAFCI_BIN_DIR/trafci | tee -a ${MY_LOG_FILE}
chmod +x $TRAFCI_BIN_DIR/trafci | tee -a ${MY_LOG_FILE}
echo "Modified trafci port number to $MY_DCS_MASTER_PORT" | tee -a ${MY_LOG_FILE}
else
echo "$TRAFCI_BIN_DIR not found" | tee -a ${MY_LOG_FILE}
fi
# end of trafci set up
if [[ -d $PHX_SRC ]]; then
echo "Phoenix files already exist, skipping Phoenix setup" | tee -a ${MY_LOG_FILE}
#######################################################
# scripts to run tests
#######################################################
# adding this in this section to enable adding it to
# existing local_hadoop installations
echo "Adding swphoenix script...." | tee -a ${MY_LOG_FILE}
cat <<EOF >$MY_SW_SCRIPTS_DIR/swphoenix
#!/bin/sh
# command to run phoenix tests
cd $PHX_SRC
if [[ \$1 == "t4" ]]
then
./phoenix_test.py --target=localhost:$MY_DCS_MASTER_PORT --user=dontcare --pw=dontcare \\
--targettype=TR --javahome=\$JAVA_HOME --jdbccp=\$MY_SQROOT/export/lib/jdbcT4.jar
elif [[ \$1 == "t2" ]]
then
export LD_PRELOAD=\$JAVA_HOME/jre/lib/amd64/libjsig.so:\$MY_SQROOT/export/lib\$SQ_MBTYPE/libseabasesig.so
./phoenix_test.py --targettype=TR --javahome=\$JAVA_HOME \\
--jdbccp=\$MY_SQROOT/export/lib/jdbcT2.jar --jdbctype=T2
else
echo "Usage: swphoenix (t2|t4)"
exit 1
fi
EOF
chmod +x $MY_SW_SCRIPTS_DIR/swphoenix
fi
# end of Phoenix setup
# Begin of Trafodion REST Server setup
if [ -d rest-* ]; then
echo "Trafodion REST files already exist, skipping REST setup"
else
#####################################################
# three options, depend on user Env variables, as described above
if [[ -f $REST_TAR ]]
then
echo "Using REST Tar: $REST_TAR" | tee -a ${MY_LOG_FILE}
elif [[ -n $REST_URL ]]
then
echo "Downloading REST Tar: $REST_URL" | tee -a ${MY_LOG_FILE}
rm -f rest_download.tar
curl ${REST_URL} -o rest_download.tar
REST_TAR=./rest_download.tar
elif [[ -d $REST_SRC ]]
then
if [[ -f $REST_SRC/target/rest*tar.gz ]]
then
echo "Using REST tar file in source tree: $REST_SRC" | tee -a ${MY_LOG_FILE}
else
echo "No REST tar file found, building REST: $REST_SRC" | tee -a ${MY_LOG_FILE}
echo "Building REST Source in $REST_SRC" | tee -a ${MY_LOG_FILE}
cd $REST_SRC
${MAVEN:-mvn} site package >>${MY_LOG_FILE} 2>&1
cd $MY_SW_ROOT
fi
REST_TAR=$(ls $REST_SRC/target/rest*tar.gz)
fi
if [[ ! -f $REST_TAR ]]
then
echo '**** ERROR:' | tee -a ${MY_LOG_FILE}
echo "REST tar file not found: $REST_TAR" | tee -a ${MY_LOG_FILE}
exit 2
fi
# install
echo "Installing REST from: $REST_TAR" | tee -a ${MY_LOG_FILE}
tar xzf $REST_TAR
REST_HOME=$(ls -d $MY_SW_ROOT/rest-*)
# configure REST
# use ~/.trafodion to avoid modifying sqenv*.sh in source tree
echo "Adding REST_INSTALL_DIR=$REST_INSTALL_DIR to sqenv via ~/.trafodion" | tee -a ${MY_LOG_FILE}
echo " Update it if switching between multiple local_hadoop environments" | tee -a ${MY_LOG_FILE}
if [[ -f ~/.trafodion ]]
then
mv -f ~/.trafodion ~/.trafodion.orig
grep -v 'REST_INSTALL_DIR=' ~/.trafodion.orig >> ~/.trafodion
fi
RESTDIR=${REST_HOME##*/}
echo "export REST_INSTALL_DIR=\${MY_SQROOT}/sql/local_hadoop/$RESTDIR" >> ~/.trafodion
cd $REST_HOME/conf/
mv rest-env.sh rest-env.sh.orig
# SQROOT env var does not propagate thru ssh command
echo "MY_SQROOT=$MY_SQROOT" > rest-env.sh
sed -e "s@#[ ]*export REST_MANAGES_ZK=true@export REST_MANAGES_ZK=false@" rest-env.sh.orig >> rest-env.sh
mv -f rest-site.xml rest-site.xml.orig
sed -e "s@</configuration>@@" rest-site.xml.orig > rest-site.xml
cat >>rest-site.xml <<EOF
<property>
<name>rest.port</name> <value>$MY_REST_SERVER_PORT</value>
</property>
<property>
<name>rest.https.port</name> <value>$MY_REST_SERVER_SECURE_PORT</value>
</property>
<property>
<name>rest.zookeeper.peerport</name> <value>$MY_HBASE_ZOOKEEPER_PEERPORT_NUM</value>
</property>
<property>
<name>rest.zookeeper.leaderport</name> <value>$MY_HBASE_ZOOKEEPER_LEADERPORT_NUM</value>
</property>
<property>
<name>rest.zookeeper.property.clientPort</name> <value>$MY_HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT_NUM</value>
</property>
</configuration>
EOF
fi
# End of Trafodion REST Server setup
if [ ! -r $YARN_HOME/lib/native/libhdfs.so ]; then
echo "------------------------------------------------------------"
echo "-- WARNING: libhdfs.so is not present on this system. Please"
echo "-- build it, otherwise Trafodion will not compile."
echo "------------------------------------------------------------"
fi
echo
echo "Installed directory size and name = $(du -sh $MY_SW_ROOT)" | tee -a ${MY_LOG_FILE}
echo
echo "Setup is complete. You can use the convenience scripts starting with sw... located in $MY_SW_SCRIPTS_DIR."