blob: 2c15c95d8f83e707c9a5c172d6183072f93a6d4d [file] [log] [blame]
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Base Vars
#
SCRIPT_NAME=`basename $0`
SCRIPT_DIR=`cd $(dirname $0) && pwd`
#
# Functions
#
usage() {
echo "Usage: $SCRIPT_NAME -f </path/to/config> -p <product (hbase or storm)> -w <worker count> -n <app name>"
exit 1
}
#
# Parse cmd line args
#
while getopts "f:p:w:n:" opt; do
case "$opt" in
f) config=$OPTARG;;
p) product=$OPTARG;;
w) worker_cnt=$OPTARG;;
n) app_name=$OPTARG;;
*) usage;;
esac
done
shift $((OPTIND-1))
if [ -z "$config" ] || [ -z "$product" ] || [ -z "$worker_cnt" ] || [ -z "$app_name" ]; then
usage
fi
# Only support know products for now
if [ $product != "hbase" ] && [ $product != "storm" ] && [ $product != "accumulo"]; then
echo "ERROR: Only HBase, Storm, and Accumulo are currently supported"
usage
fi
#
# Source the config
#
source $config
#
# Product URLs
#
if [ $product = "hbase" ]; then
app_url=$HBASE_APP_URL
elif [ $product = "storm" ]; then
app_url=$STORM_APP_URL
elif [ $product = "accumulo" ]; then
app_url=$ACCUMULO_APP_URL
fi
#
# Main
#
echo -e "\n## Creating slider install dir: $SLIDER_INST_DIR"
mkdir -p $SLIDER_INST_DIR || exit 1
chown yarn:hadoop $SLIDER_INST_DIR || exit 1
echo "SUCCESS"
echo -e "\n## Downloading slider from: $SLIDER_URL"
if [ -f /tmp/${SLIDER_VER}*tar ]; then
rm -f /tmp/${SLIDER_VER}*tar
fi
cd /tmp && wget $SLIDER_URL || exit 1
echo "SUCCESS"
echo -e "\n## Extracting slider to $SLIDER_INST_DIR"
tar -xf /tmp/${SLIDER_VER}*tar --strip-components=1 -C $SLIDER_INST_DIR || exit 1
chown -R yarn:hadoop $SLIDER_INST_DIR || exit 1
echo "SUCCESS"
echo -e "\n## Setting conf values"
(cd $SCRIPT_DIR && cp slider-client.xml $SLIDER_INST_DIR/conf) || exit 1
sed -i 's|@@RM_ADDRESS@@|'$RM_ADDRESS'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
sed -i 's|@@RM_SCHED_ADDRESS@@|'$RM_SCHED_ADDRESS'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
sed -i 's|@@DEFAULT_FS@@|'$DEFAULT_FS'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
sed -i 's|@@YARN_CP@@|'$YARN_CP'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
sed -i 's|@@ZK_QUORUM@@|'$ZK_QUORUM'|g' $SLIDER_INST_DIR/conf/slider-client.xml || exit 1
echo "SUCCESS"
echo -e "\n## Setting PATH to include the JDK bin: $JDK_BIN"
export PATH=$PATH:$JDK_BIN
echo "SUCCESS"
echo -e "\n## Checking version of Hadoop slider was compiled against"
hadoop_compiled_ver=`$SLIDER_INST_DIR/bin/slider version | grep "Compiled against Hadoop" | awk '{print $NF}'`
if [ "$hadoop_compiled_ver" != "2.4.0" ]; then
echo "ERROR: Compiled against Hadoop version $hadoop_compiled_ver instead of 2.4.0" && exit 1
else
echo "Compiled against Hadoop version: $hadoop_compiled_ver"
fi
echo "SUCCESS"
echo -e "\n## Setting up HDFS directories for slider"
sudo -u hdfs hdfs dfs -mkdir -p /slider || exit 1
sudo -u hdfs hdfs dfs -chown yarn:hdfs /slider || exit 1
sudo -u hdfs hdfs dfs -mkdir -p /user/yarn || exit 1
sudo -u hdfs hdfs dfs -chown yarn:hdfs /user/yarn || exit 1
echo "SUCCESS"
echo -e "\n## Loading the Slider agent"
sudo -u yarn hdfs dfs -mkdir -p /slider/agent/conf || exit 1
sudo -u yarn hdfs dfs -copyFromLocal $SLIDER_INST_DIR/agent/* /slider/agent
echo "SUCCESS"
echo -e "\n## Downloading $product to $SLIDER_INST_DIR/apps/$product"
sudo -u yarn mkdir -p $SLIDER_INST_DIR/apps/$product || exit 1
(cd $SLIDER_INST_DIR/apps/$product && sudo -u yarn wget $app_url) || exit 1
echo "SUCCESS"
echo -e "\n## Extracting $product in $SLIDER_INST_DIR/apps/$product"
(cd $SLIDER_INST_DIR/apps/$product && sudo -u yarn unzip -o $product*zip) || exit 1
echo "SUCCESS"
echo -e "\n## Adding $product to HDFS slider dir"
sudo -u yarn hdfs dfs -copyFromLocal $SLIDER_INST_DIR/apps/$product/$product*zip /slider
echo "SUCCESS"
echo -e "\n## Setting number of workers in $SLIDER_INST_DIR/apps/$product/resources.json"
if [ $product = "hbase" ]; then
component="HBASE_REGIONSERVER"
elif [ $product = "storm" ]; then
component="SUPERVISOR"
elif [ $product = "accumulo" ]; then
component="ACCUMULO_TSERVER"
fi
# Update the resource.json file with worker_cnt
python << END
import json
with open("$SLIDER_INST_DIR/apps/$product/resources.json", "r+") as f:
data = json.load(f)
data["components"]["$component"]["yarn.component.instances"] = "$worker_cnt"
with open("$SLIDER_INST_DIR/apps/$product/resources.json", "w+") as f:
f.write(json.dumps(data, sort_keys = False, indent = 4))
END
echo "SUCCESS"
# Handle HBase HDFS dir needs
if [ $product = "hbase" ]; then
echo -e "\n## Creating hbase HDFS dir /apps/hbase"
sudo -u hdfs hdfs dfs -mkdir -p /apps/hbase || exit 1
sudo -u hdfs hdfs dfs -chown yarn:hdfs /apps/hbase || exit 1
echo "SUCCESS"
fi
echo -e "\n##Starting app $product with $worker_cnt workers via slider"
sudo -u yarn $SLIDER_INST_DIR/bin/slider create $app_name \
--image $DEFAULT_FS/slider/agent/slider-agent.tar.gz \
--template $SLIDER_INST_DIR/apps/$product/appConfig.json \
--resources $SLIDER_INST_DIR/apps/$product/resources.json || exit 1
echo "SUCCESS"