HDFS-1963. Create RPM and Debian packages for HDFS. Changes deployment
layout to be consistent across the binary tgz, rpm, and deb.
(Eric Yang via omalley)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@1128393 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/CHANGES.txt b/CHANGES.txt
index bd3d308..cae0d00 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -101,8 +101,8 @@
HDFS-1663. Federation: Rename getPoolId() everywhere to
getBlockPoolId() (tanping via boryas)
- HDFS-1652. FederationL Add support for multiple namenodes in MiniDFSCluster.
- (suresh)
+ HDFS-1652. FederationL Add support for multiple namenodes in
+ MiniDFSCluster. (suresh)
HDFS-1672. Federation: refactor stopDatanode(name) to work
with multiple Block Pools (boryas)
@@ -243,8 +243,8 @@
HDFS-1754. Federation: testFsck fails. (boryas)
- HDFS-1755. Federation: The BPOfferService must always connect to namenode as
- the login user. (jitendra)
+ HDFS-1755. Federation: The BPOfferService must always connect to namenode
+ as the login user. (jitendra)
HDFS-1675. Support transferring RBW between datanodes. (szetszwo)
@@ -281,6 +281,9 @@
HDFS-1914. Federation: namenode storage directories must be configurable
specific to name service. (suresh)
+ HDFS-1963. Create RPM and Debian packages for HDFS. Changes deployment
+ layout to be consistent across the binary tgz, rpm, and deb.
+ (Eric Yang via omalley)
IMPROVEMENTS
diff --git a/bin/distribute-exclude.sh b/bin/distribute-exclude.sh
index a627ba1..cc538f7 100644
--- a/bin/distribute-exclude.sh
+++ b/bin/distribute-exclude.sh
@@ -36,7 +36,7 @@
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
-. "$bin/hdfs-config.sh"
+. "$bin/../libexec/hdfs-config.sh"
if [ "$1" = '' ] ; then
"Error: please specify local exclude file as a first argument"
@@ -50,8 +50,8 @@
exit 1
fi
-namenodes=$("$HADOOP_HDFS_HOME/bin/hdfs" getconf -namenodes)
-excludeFilenameRemote=$("$HADOOP_HDFS_HOME/bin/hdfs" getconf -excludeFile)
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
+excludeFilenameRemote=$("$HADOOP_PREFIX/bin/hdfs" getconf -excludeFile)
if [ "$excludeFilenameRemote" = '' ] ; then
echo \
diff --git a/bin/hdfs b/bin/hdfs
index ce9dc0a..d1d6026 100755
--- a/bin/hdfs
+++ b/bin/hdfs
@@ -15,10 +15,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-bin=`dirname "$0"`
+bin=`which $0`
+bin=`dirname ${bin}`
bin=`cd "$bin"; pwd`
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
function print_usage(){
echo "Usage: hdfs [--config confdir] COMMAND"
@@ -107,29 +108,29 @@
fi
# for developers, add hdfs classes to CLASSPATH
-if [ -d "$HADOOP_HDFS_HOME/build/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/classes
+if [ -d "$HADOOP_PREFIX/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/classes
fi
-if [ -d "$HADOOP_HDFS_HOME/build/webapps" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build
+if [ -d "$HADOOP_PREFIX/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build
fi
-if [ -d "$HADOOP_HDFS_HOME/build/test/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/test/classes
+if [ -d "$HADOOP_PREFIX/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/test/classes
fi
-if [ -d "$HADOOP_HDFS_HOME/build/tools" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/tools
+if [ -d "$HADOOP_PREFIX/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/tools
fi
# for releases, add core hdfs jar & webapps to CLASSPATH
-if [ -d "$HADOOP_HDFS_HOME/webapps" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME
+if [ -d "$HADOOP_PREFIX/share/hadoop/hdfs/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/share/hadoop/hdfs
fi
-for f in $HADOOP_HDFS_HOME/hadoop-hdfs-*.jar; do
+for f in $HADOOP_PREFIX/share/hadoop-hdfs/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
# add libs to CLASSPATH
-for f in $HADOOP_HDFS_HOME/lib/*.jar; do
+for f in $HADOOP_PREFIX/lib/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
@@ -146,7 +147,7 @@
HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
fi
- exec "$HADOOP_HDFS_HOME/bin/jsvc" \
+ exec "$HADOOP_PREFIX/bin/jsvc" \
-Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \
-errfile "$HADOOP_LOG_DIR/jsvc.err" \
-pidfile "$HADOOP_SECURE_DN_PID" \
diff --git a/bin/hdfs-config.sh b/bin/hdfs-config.sh
index 43a5ab5..1c4f8c6 100644
--- a/bin/hdfs-config.sh
+++ b/bin/hdfs-config.sh
@@ -18,15 +18,14 @@
# included in all the hdfs scripts with source command
# should not be executed directly
-bin=`dirname "$0"`
+bin=`which "$0"`
+bin=`dirname "${bin}"`
bin=`cd "$bin"; pwd`
-export HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$bin/..}"
+export HADOOP_PREFIX="${HADOOP_PREFIX:-$bin/..}"
-if [ -d "${HADOOP_COMMON_HOME}" ]; then
- . "$HADOOP_COMMON_HOME"/bin/hadoop-config.sh
-elif [ -d "${HADOOP_HOME}" ]; then
- . "$HADOOP_HOME"/bin/hadoop-config.sh
+if [ -d "$bin" ]; then
+ . $bin/../libexec/hadoop-config.sh
elif [ -e "${HADOOP_HDFS_HOME}"/bin/hadoop-config.sh ]; then
. "$HADOOP_HDFS_HOME"/bin/hadoop-config.sh
else
diff --git a/bin/refresh-namenodes.sh b/bin/refresh-namenodes.sh
index c382951..4e226f7 100644
--- a/bin/refresh-namenodes.sh
+++ b/bin/refresh-namenodes.sh
@@ -23,13 +23,13 @@
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
-. "$bin/hdfs-config.sh"
+. "$bin/../libexec/hdfs-config.sh"
-namenodes=$("$HADOOP_HDFS_HOME/bin/hdfs" getconf -namenodes)
+namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
for namenode in $namenodes ; do
echo "Refreshing namenode [$namenode]"
- "$HADOOP_HDFS_HOME/bin/hdfs" dfsadmin -refreshNodes
+ "$HADOOP_PREFIX/bin/hdfs" dfsadmin -refreshNodes
if [ "$?" != '0' ] ; then errorFlag='1' ; fi
done
diff --git a/bin/start-balancer.sh b/bin/start-balancer.sh
index dfae171..b6b3aa7 100755
--- a/bin/start-balancer.sh
+++ b/bin/start-balancer.sh
@@ -18,8 +18,8 @@
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
# Start balancer daemon.
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@
+"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@
diff --git a/bin/start-dfs.sh b/bin/start-dfs.sh
index eae76df..32dcf86 100755
--- a/bin/start-dfs.sh
+++ b/bin/start-dfs.sh
@@ -25,7 +25,7 @@
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
-. "$bin/hdfs-config.sh"
+. "$bin"/../libexec/hdfs-config.sh
# get arguments
if [ $# -ge 1 ]; then
@@ -47,11 +47,11 @@
#---------------------------------------------------------
# namenodes
-NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -namenodes)
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
echo "Starting namenodes on [$NAMENODES]"
-"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+"$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$NAMENODES" \
--script "$bin/hdfs" start namenode $nameStartOpt
@@ -64,7 +64,7 @@
"Attempting to start secure cluster, skipping datanodes. " \
"Run start-secure-dns.sh as root to complete startup."
else
- "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+ "$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--script "$bin/hdfs" start datanode $dataStartOpt
fi
@@ -74,7 +74,7 @@
# if there are no secondary namenodes configured it returns
# 0.0.0.0 or empty string
-SECONDARY_NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
SECONDARY_NAMENODES=${SECONDARY_NAMENODES:='0.0.0.0'}
if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
@@ -84,7 +84,7 @@
else
echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
- "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+ "$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$SECONDARY_NAMENODES" \
--script "$bin/hdfs" start secondarynamenode
diff --git a/bin/start-secure-dns.sh b/bin/start-secure-dns.sh
index 5877043..c4190db 100644
--- a/bin/start-secure-dns.sh
+++ b/bin/start-secure-dns.sh
@@ -22,10 +22,10 @@
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
- "$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+ "$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
else
echo $usage
fi
diff --git a/bin/stop-balancer.sh b/bin/stop-balancer.sh
index 3abb903..7edd0bd 100755
--- a/bin/stop-balancer.sh
+++ b/bin/stop-balancer.sh
@@ -18,9 +18,9 @@
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
# Stop balancer daemon.
# Run this on the machine where the balancer is running
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer
+"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer
diff --git a/bin/stop-dfs.sh b/bin/stop-dfs.sh
index ec3ca74..7158ca6 100755
--- a/bin/stop-dfs.sh
+++ b/bin/stop-dfs.sh
@@ -18,16 +18,16 @@
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
#---------------------------------------------------------
# namenodes
-NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -namenodes)
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
echo "Stopping namenodes on [$NAMENODES]"
-"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+"$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$NAMENODES" \
--script "$bin/hdfs" stop namenode
@@ -40,7 +40,7 @@
"Attempting to stop secure cluster, skipping datanodes. " \
"Run stop-secure-dns.sh as root to complete shutdown."
else
- "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+ "$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--script "$bin/hdfs" stop datanode
fi
@@ -50,7 +50,7 @@
# if there are no secondary namenodes configured it returns
# 0.0.0.0 or empty string
-SECONDARY_NAMENODES=$($HADOOP_HDFS_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
SECONDARY_NAMENODES=${SECONDARY_NAMENODES:-'0.0.0.0'}
if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
@@ -60,7 +60,7 @@
else
echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
- "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+ "$HADOOP_PREFIX/bin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$SECONDARY_NAMENODES" \
--script "$bin/hdfs" stop secondarynamenode
diff --git a/bin/stop-secure-dns.sh b/bin/stop-secure-dns.sh
index 3a60ec8..63854c4 100644
--- a/bin/stop-secure-dns.sh
+++ b/bin/stop-secure-dns.sh
@@ -22,10 +22,10 @@
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
-. "$bin"/hdfs-config.sh
+. "$bin"/../libexec/hdfs-config.sh
if [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
- "$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+ "$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
else
echo $usage
fi
diff --git a/build.xml b/build.xml
index 847402c..a9f7635 100644
--- a/build.xml
+++ b/build.xml
@@ -25,15 +25,18 @@
<!-- to contribute (without having to type -D or edit this file -->
<property file="${user.home}/build.properties" />
<property file="${basedir}/build.properties" />
-
+
+ <property name="module" value="hdfs"/>
<property name="Name" value="Hadoop-Hdfs"/>
- <property name="name" value="hadoop-hdfs"/>
+ <property name="name" value="hadoop-${module}"/>
<!-- ATTN: Need to change aop.xml's project.version prop. synchronously -->
- <property name="version" value="0.23.0-SNAPSHOT"/>
+ <property name="_version" value="0.23.0"/>
+ <property name="version" value="${_version}-SNAPSHOT"/>
<property name="final.name" value="${name}-${version}"/>
<property name="test.hdfs.final.name" value="${name}-test-${version}"/>
<property name="ant.final.name" value="${name}-ant-${version}"/>
<property name="year" value="2009"/>
+ <property name="package.release" value="1"/>
<property name="src.dir" value="${basedir}/src"/>
<property name="java.src.dir" value="${src.dir}/java"/>
@@ -212,6 +215,17 @@
<equals arg1="${repo}" arg2="staging"/>
</condition>
+ <!-- packaging properties -->
+ <property name="package.prefix" value="/usr"/>
+ <property name="package.conf.dir" value="/etc/hadoop"/>
+ <property name="package.log.dir" value="/var/log/hadoop/hdfs"/>
+ <property name="package.pid.dir" value="/var/run/hadoop"/>
+ <property name="package.var.dir" value="/var/lib/hadoop"/>
+ <property name="package.share.dir" value="share/hadoop/${module}"/>
+ <!-- Use fixed path to build rpm for avoiding rpmbuild conflict with dash path names -->
+ <property name="package.buildroot" value="/tmp/hadoop_package_hdfs_build_${user.name}"/>
+ <property name="package.build.dir" value="/tmp/hadoop_package_hdfs_build_${user.name}/BUILD"/>
+
<!-- the normal classpath -->
<path id="classpath">
<pathelement location="${build.classes}"/>
@@ -1086,16 +1100,20 @@
description="assembles artifacts for binary target">
<mkdir dir="${dist.dir}"/>
<mkdir dir="${dist.dir}/lib"/>
- <mkdir dir="${dist.dir}/contrib"/>
- <mkdir dir="${dist.dir}/bin"/>
+ <mkdir dir="${dist.dir}/${package.share.dir}/contrib"/>
+ <mkdir dir="${dist.dir}/${package.share.dir}/lib"/>
+ <mkdir dir="${dist.dir}/${package.share.dir}/templates"/>
+ <mkdir dir="${dist.dir}/bin"/>
+ <mkdir dir="${dist.dir}/libexec"/>
+ <mkdir dir="${dist.dir}/sbin"/>
- <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true">
+ <copy todir="${dist.dir}/${package.share.dir}/lib" includeEmptyDirs="false" flatten="true">
<fileset dir="${hdfs.ivy.lib.dir}"/>
</copy>
<copy todir="${dist.dir}/lib" includeEmptyDirs="false">
- <fileset dir="lib">
- <exclude name="**/native/**"/>
+ <fileset dir="${build.dir}/c++/${build.platform}/lib">
+ <include name="**"/>
</fileset>
</copy>
@@ -1103,42 +1121,62 @@
<!--Pass down the version in case its needed again and the target
distribution directory so contribs know where to install to.-->
<property name="version" value="${version}"/>
- <property name="dist.dir" value="${dist.dir}"/>
+ <property name="dist.dir" value="${dist.dir}/${package.share.dir}"/>
<fileset file="${contrib.dir}/build.xml"/>
- </subant>
+ </subant>
- <copy todir="${dist.dir}/webapps">
- <fileset dir="${build.webapps}"/>
- </copy>
-
- <copy todir="${dist.dir}">
+ <copy todir="${dist.dir}/${package.share.dir}">
<fileset file="${build.dir}/${name}-*.jar"/>
</copy>
<copy todir="${dist.dir}/bin">
- <fileset dir="bin"/>
+ <fileset dir="bin">
+ <include name="hdfs"/>
+ </fileset>
</copy>
+
+ <copy todir="${dist.dir}/libexec">
+ <fileset dir="bin">
+ <include name="hdfs-config.sh"/>
+ </fileset>
+ </copy>
+
+ <copy todir="${dist.dir}/sbin">
+ <fileset dir="bin">
+ <include name="start-*.sh"/>
+ <include name="stop-*.sh"/>
+ </fileset>
+ </copy>
+
+ <copy file="${basedir}/src/packages/rpm/init.d/hadoop-namenode" tofile="${dist.dir}/sbin/hadoop-namenode.redhat"/>
+ <copy file="${basedir}/src/packages/rpm/init.d/hadoop-datanode" tofile="${dist.dir}/sbin/hadoop-datanode.redhat"/>
+ <copy file="${basedir}/src/packages/deb/init.d/hadoop-namenode" tofile="${dist.dir}/sbin/hadoop-namenode.debian"/>
+ <copy file="${basedir}/src/packages/deb/init.d/hadoop-datanode" tofile="${dist.dir}/sbin/hadoop-datanode.debian"/>
+
+ <copy file="${basedir}/src/packages/update-hdfs-env.sh" tofile="${dist.dir}/sbin/update-hdfs-env.sh"/>
- <copy todir="${dist.dir}/conf">
+ <copy todir="${dist.dir}/etc/hadoop">
<fileset dir="${conf.dir}" excludes="**/*.template"/>
+ <fileset dir="${basedir}/src/packages/templates/conf" includes="*.template"/>
</copy>
- <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/>
-
- <copy todir="${dist.dir}/ivy">
- <fileset dir="ivy"/>
+ <copy todir="${dist.dir}/${package.share.dir}/templates">
+ <fileset dir="${basedir}/src/packages/templates/conf" includes="*"/>
</copy>
- <copy todir="${dist.dir}">
+ <copy todir="${dist.dir}/${package.share.dir}/webapps">
+ <fileset dir="${build.webapps}"/>
+ </copy>
+
+ <copy todir="${dist.dir}/share/doc/hadoop/${module}">
<fileset dir=".">
<include name="*.txt" />
</fileset>
</copy>
- <copy todir="${dist.dir}/" file="build.xml"/>
-
<chmod perm="ugo+x" type="file" parallel="false">
<fileset dir="${dist.dir}/bin"/>
+ <fileset dir="${dist.dir}/sbin"/>
</chmod>
</target>
@@ -1167,12 +1205,15 @@
<param.listofitems>
<tarfileset dir="${system-test-build-dir}" mode="664">
<exclude name="${final.name}/bin/*" />
+ <exclude name="${final.name}/libexec/*" />
<exclude name="${final.name}/src/**" />
<exclude name="${final.name}/docs/**" />
<include name="${final.name}/**" />
</tarfileset>
<tarfileset dir="${build.dir}" mode="755">
<include name="${final.name}/bin/*" />
+ <include name="${final.name}/libexec/*" />
+ <include name="${final.name}/sbin/*" />
</tarfileset>
</param.listofitems>
</macro_tar>
@@ -1183,17 +1224,129 @@
<param.listofitems>
<tarfileset dir="${build.dir}" mode="664">
<exclude name="${final.name}/bin/*" />
+ <exclude name="${final.name}/libexec/*" />
+ <exclude name="${final.name}/sbin/*" />
<exclude name="${final.name}/src/**" />
<exclude name="${final.name}/docs/**" />
<include name="${final.name}/**" />
</tarfileset>
<tarfileset dir="${build.dir}" mode="755">
<include name="${final.name}/bin/*" />
+ <include name="${final.name}/libexec/*" />
+ <include name="${final.name}/sbin/*" />
</tarfileset>
</param.listofitems>
</macro_tar>
</target>
+ <target name="rpm" depends="binary" description="Make rpm package">
+ <mkdir dir="${package.buildroot}/BUILD" />
+ <mkdir dir="${package.buildroot}/RPMS" />
+ <mkdir dir="${package.buildroot}/SRPMS" />
+ <mkdir dir="${package.buildroot}/SOURCES" />
+ <mkdir dir="${package.buildroot}/SPECS" />
+ <copy todir="${package.buildroot}/SOURCES">
+ <fileset dir="${build.dir}">
+ <include name="${final.name}-bin.tar.gz" />
+ </fileset>
+ </copy>
+ <copy file="${src.dir}/packages/rpm/spec/hadoop-hdfs.spec" todir="${package.buildroot}/SPECS">
+ <filterchain>
+ <replacetokens>
+ <token key="final.name" value="${final.name}" />
+ <token key="version" value="${_version}" />
+ <token key="package.release" value="${package.release}" />
+ <token key="package.build.dir" value="${package.build.dir}" />
+ <token key="package.prefix" value="${package.prefix}" />
+ <token key="package.conf.dir" value="${package.conf.dir}" />
+ <token key="package.log.dir" value="${package.log.dir}" />
+ <token key="package.pid.dir" value="${package.pid.dir}" />
+ <token key="package.var.dir" value="${package.var.dir}" />
+ </replacetokens>
+ </filterchain>
+ </copy>
+ <rpm specFile="hadoop-hdfs.spec" command="-bb --target ${os.arch}" topDir="${package.buildroot}" cleanBuildDir="true" failOnError="true"/>
+ <copy todir="${build.dir}/" flatten="true">
+ <fileset dir="${package.buildroot}/RPMS">
+ <include name="**/*.rpm" />
+ </fileset>
+ </copy>
+ <delete dir="${package.buildroot}" quiet="true" verbose="false"/>
+ </target>
+
+ <target name="deb" depends="ivy-retrieve-package, binary" description="Make deb package">
+ <taskdef name="deb"
+ classname="org.vafer.jdeb.ant.DebAntTask">
+ <classpath refid="ivy-package.classpath" />
+ </taskdef>
+
+ <mkdir dir="${package.build.dir}/hadoop.control" />
+ <mkdir dir="${package.buildroot}/${package.prefix}" />
+ <copy todir="${package.buildroot}/${package.prefix}">
+ <fileset dir="${build.dir}/${final.name}">
+ <include name="**" />
+ </fileset>
+ </copy>
+ <copy todir="${package.build.dir}/hadoop.control">
+ <fileset dir="${src.dir}/packages/deb/hadoop.control">
+ <exclude name="control" />
+ </fileset>
+ </copy>
+ <copy file="${src.dir}/packages/deb/hadoop.control/control" todir="${package.build.dir}/hadoop.control">
+ <filterchain>
+ <replacetokens>
+ <token key="final.name" value="${final.name}" />
+ <token key="version" value="${_version}" />
+ <token key="package.release" value="${package.release}" />
+ <token key="package.build.dir" value="${package.build.dir}" />
+ <token key="package.prefix" value="${package.prefix}" />
+ <token key="package.conf.dir" value="${package.conf.dir}" />
+ <token key="package.log.dir" value="${package.log.dir}" />
+ <token key="package.pid.dir" value="${package.pid.dir}" />
+ </replacetokens>
+ </filterchain>
+ </copy>
+ <deb destfile="${package.buildroot}/${name}_${_version}-${package.release}_${os.arch}.deb" control="${package.build.dir}/hadoop.control">
+ <tarfileset dir="${build.dir}/${final.name}" filemode="644" prefix="${package.prefix}">
+ <exclude name="bin/*" />
+ <exclude name="${package.share.dir}/contrib/*/bin/*" />
+ <exclude name="etc" />
+ <exclude name="etc/**" />
+ <exclude name="libexec/*" />
+ <exclude name="sbin/*" />
+ <include name="**" />
+ </tarfileset>
+ <tarfileset dir="${build.dir}/${final.name}" filemode="755" prefix="${package.prefix}">
+ <include name="bin/*" />
+ <exclude name="sbin/*.redhat" />
+ <exclude name="sbin/*.debian" />
+ <include name="sbin/*" />
+ <include name="libexec/*" />
+ <include name="${package.share.dir}/contrib/*/bin/*" />
+ </tarfileset>
+ <tarfileset dir="${src.dir}/packages" filemode="755" prefix="${package.prefix}/sbin">
+ <include name="*.sh" />
+ </tarfileset>
+ <tarfileset dir="${build.dir}/${final.name}/etc/hadoop" filemode="644" prefix="${package.conf.dir}">
+ <include name="**" />
+ <exclude name="configuration.xsl" />
+ <exclude name="hadoop-metrics2.properties" />
+ <exclude name="core-site.xml" />
+ <exclude name="hdfs-site.xml" />
+ <exclude name="mapred-site.xml" />
+ </tarfileset>
+ <tarfileset dir="${basedir}/src/packages/deb/init.d" filemode="755" prefix="/etc/init.d">
+ <include name="**" />
+ </tarfileset>
+ </deb>
+ <copy todir="${build.dir}/" flatten="true">
+ <fileset dir="${package.buildroot}">
+ <include name="**/${name}*.deb" />
+ </fileset>
+ </copy>
+ <delete dir="${package.buildroot}" quiet="true" verbose="false"/>
+ </target>
+
<!-- ================================================================== -->
<!-- Perform audit activities for the release -->
<!-- ================================================================== -->
@@ -1293,7 +1446,7 @@
<env key="OS_ARCH" value="${os.arch}"/>
<env key="JVM_ARCH" value="${jvm.arch}"/>
<env key="LIBHDFS_BUILD_DIR" value="${build.c++.libhdfs}"/>
- <env key="HADOOP_HOME" value="${basedir}"/>
+ <env key="HADOOP_PREFIX" value="${basedir}"/>
<env key="HADOOP_CONF_DIR" value="${test.libhdfs.dir}/conf"/>
<env key="HADOOP_LOG_DIR" value="${test.libhdfs.dir}/logs"/>
<env key="LIBHDFS_TEST_DIR" value="${test.libhdfs.dir}"/>
@@ -1776,11 +1929,21 @@
log="${ivyresolvelog}"/>
</target>
+ <target name="ivy-resolve-compile" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="compile"
+ log="${ivyresolvelog}"/>
+ </target>
+
<target name="ivy-resolve-common" depends="ivy-init">
<ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common"
log="${ivyresolvelog}"/>
</target>
+ <target name="ivy-resolve-package" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="package"
+ log="${ivyresolvelog}"/>
+ </target>
+
<target name="ivy-resolve-hdfs" depends="ivy-init">
<ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="hdfs"
log="${ivyresolvelog}"/>
@@ -1840,14 +2003,30 @@
<ivy:cachepath pathid="ivy-test.classpath" conf="test"/>
</target>
- <target name="ivy-retrieve-common" depends="ivy-resolve-common"
+ <target name="ivy-retrieve-compile" depends="ivy-resolve-compile"
description="Retrieve Ivy-managed artifacts for the compile configurations">
<ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"
log="${ivyretrievelog}"/>
+ <ivy:cachepath pathid="ivy-compile.classpath" conf="compile"/>
+ </target>
+
+ <target name="ivy-retrieve-common" depends="ivy-resolve-common"
+ description="Retrieve Ivy-managed artifacts for the runtime configurations">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"
+ log="${ivyretrievelog}"/>
<ivy:cachepath pathid="ivy-common.classpath" conf="common"/>
</target>
+ <target name="ivy-retrieve-package" depends="ivy-resolve-package"
+ description="Retrieve Ivy-managed artifacts for the package configurations">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"
+ log="${ivyretrievelog}"/>
+ <ivy:cachepath pathid="ivy-package.classpath" conf="package"/>
+ </target>
+
<target name="ivy-retrieve-hdfs" depends="ivy-resolve-hdfs"
description="Retrieve Ivy-managed artifacts for the hdfs configurations">
<ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
diff --git a/ivy.xml b/ivy.xml
index d4c72e1..8c3cc14 100644
--- a/ivy.xml
+++ b/ivy.xml
@@ -40,6 +40,7 @@
<conf name="hdfs" visibility="private" extends="compile,runtime" description="HDFS dependent artifacts"/>
<conf name="javadoc" visibility="private" description="artiracts required while performing doc generation" extends="common"/>
<conf name="test" extends="master" visibility="private" description="the classpath needed to run tests"/>
+ <conf name="package" extends="master" description="the classpath needed for packaging"/>
<conf name="system" extends="test" visibility="private" description="the classpath needed to run system tests"/>
<conf name="test-hdfswithmr" extends="test, common" visibility="private" description="the classpath needed to run tests"/>
@@ -63,16 +64,16 @@
<dependency org="org.apache.hadoop" name="hadoop-common-instrumented" rev="${hadoop-common.version}" conf="system->default">
<exclude module="ant"/>
</dependency>
- <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->master"/>
+ <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="compile->master"/>
<dependency org="commons-daemon" name="commons-daemon" rev="${commons-daemon.version}" conf="hdfs->default" />
<dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
- <dependency org="org.apache.hadoop" name="avro" rev="${avro.version}" conf="common->default">
+ <dependency org="org.apache.hadoop" name="avro" rev="${avro.version}" conf="compile->master">
<exclude module="ant"/>
<exclude module="jetty"/>
<exclude module="slf4j-simple"/>
</dependency>
- <dependency org="org.aspectj" name="aspectjrt" rev="${aspectj.version}" conf="common->default"/>
- <dependency org="org.aspectj" name="aspectjtools" rev="${aspectj.version}" conf="common->default"/>
+ <dependency org="org.aspectj" name="aspectjrt" rev="${aspectj.version}" conf="compile->master"/>
+ <dependency org="org.aspectj" name="aspectjtools" rev="${aspectj.version}" conf="compile->master"/>
<dependency org="junit" name="junit" rev="${junit.version}" conf="test->master"/>
@@ -93,6 +94,7 @@
<dependency org="org.mockito" name="mockito-all" rev="${mockito-all.version}" conf="test->master"/>
+ <dependency org="org.vafer" name="jdeb" rev="${jdeb.version}" conf="package->master"/>
</dependencies>
</ivy-module>
diff --git a/ivy/libraries.properties b/ivy/libraries.properties
index ee60711..00e8331 100644
--- a/ivy/libraries.properties
+++ b/ivy/libraries.properties
@@ -42,6 +42,7 @@
ivy.version=2.1.0
jasper.version=5.5.12
+jdeb.version=0.8
jsp.version=2.1
jsp-api.version=5.5.12
jetty.version=6.1.14
diff --git a/src/c++/libhdfs/tests/test-libhdfs.sh b/src/c++/libhdfs/tests/test-libhdfs.sh
index b22eb94..82afe71 100755
--- a/src/c++/libhdfs/tests/test-libhdfs.sh
+++ b/src/c++/libhdfs/tests/test-libhdfs.sh
@@ -19,7 +19,7 @@
#
# Note: This script depends on 8 environment variables to function correctly:
# a) CLASSPATH
-# b) HADOOP_HOME
+# b) HADOOP_PREFIX
# c) HADOOP_CONF_DIR
# d) HADOOP_LOG_DIR
# e) LIBHDFS_BUILD_DIR
@@ -30,10 +30,10 @@
#
HDFS_TEST=hdfs_test
-HADOOP_LIB_DIR=$HADOOP_HOME/lib
-HADOOP_BIN_DIR=$HADOOP_HOME/bin
+HADOOP_LIB_DIR=$HADOOP_PREFIX/lib
+HADOOP_BIN_DIR=$HADOOP_PREFIX/bin
-COMMON_BUILD_DIR=$HADOOP_HOME/build/ivy/lib/Hadoop-Hdfs/common
+COMMON_BUILD_DIR=$HADOOP_PREFIX/build/ivy/lib/Hadoop-Hdfs/common
COMMON_JAR=$COMMON_BUILD_DIR/hadoop-common-0.22.0-SNAPSHOT.jar
cat > $HADOOP_CONF_DIR/core-site.xml <<EOF
@@ -77,9 +77,9 @@
# If we are running from the hdfs repo we need to make sure
# HADOOP_BIN_DIR contains the common scripts.
# If the bin directory does not and we've got a common jar extract its
-# bin directory to HADOOP_HOME/bin. The bin scripts hdfs-config.sh and
+# bin directory to HADOOP_PREFIX/bin. The bin scripts hdfs-config.sh and
# hadoop-config.sh assume the bin directory is named "bin" and that it
-# is located in HADOOP_HOME.
+# is located in HADOOP_PREFIX.
unpacked_common_bin_dir=0
if [ ! -f $HADOOP_BIN_DIR/hadoop-config.sh ]; then
if [ -f $COMMON_JAR ]; then
@@ -91,7 +91,7 @@
# Manipulate HADOOP_CONF_DIR too
# which is necessary to circumvent bin/hadoop
-HADOOP_CONF_DIR=$HADOOP_CONF_DIR:$HADOOP_HOME/conf
+HADOOP_CONF_DIR=$HADOOP_CONF_DIR:$HADOOP_PREFIX/conf
# set pid file dir so they are not written to /tmp
export HADOOP_PID_DIR=$HADOOP_LOG_DIR
@@ -101,14 +101,14 @@
CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
# for developers, add Hadoop classes to CLASSPATH
-if [ -d "$HADOOP_HOME/build/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes
+if [ -d "$HADOOP_PREFIX/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/classes
fi
-if [ -d "$HADOOP_HOME/build/webapps" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build
+if [ -d "$HADOOP_PREFIX/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build
fi
-if [ -d "$HADOOP_HOME/build/test/classes" ]; then
- CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes
+if [ -d "$HADOOP_PREFIX/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_PREFIX/build/test/classes
fi
# add Clover jar file needed for code coverage runs
@@ -118,14 +118,14 @@
IFS=
# add libs to CLASSPATH
-for f in $HADOOP_HOME/lib/*.jar; do
+for f in $HADOOP_PREFIX/lib/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
-for f in $HADOOP_HOME/*.jar; do
+for f in $HADOOP_PREFIX/*.jar; do
CLASSPATH=${CLASSPATH}:$f
done
-for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do
+for f in $HADOOP_PREFIX/lib/jsp-2.1/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
@@ -176,7 +176,7 @@
echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
# Put delays to ensure hdfs is up and running and also shuts down
# after the tests are complete
-cd $HADOOP_HOME
+cd $HADOOP_PREFIX
echo Y | $HADOOP_BIN_DIR/hdfs namenode -format &&
$HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs start namenode && sleep 2
$HADOOP_BIN_DIR/hadoop-daemon.sh --script $HADOOP_BIN_DIR/hdfs start datanode && sleep 2
diff --git a/src/contrib/fuse-dfs/README b/src/contrib/fuse-dfs/README
index 849f4f7..1744892 100644
--- a/src/contrib/fuse-dfs/README
+++ b/src/contrib/fuse-dfs/README
@@ -33,9 +33,9 @@
BUILDING
- 1. in HADOOP_HOME: `ant compile-libhdfs -Dlibhdfs=1
- 2. in HADOOP_HOME: `ant package` to deploy libhdfs
- 3. in HADOOP_HOME: `ant compile-contrib -Dlibhdfs=1 -Dfusedfs=1`
+ 1. in HADOOP_PREFIX: `ant compile-libhdfs -Dlibhdfs=1
+ 2. in HADOOP_PREFIX: `ant package` to deploy libhdfs
+ 3. in HADOOP_PREFIX: `ant compile-contrib -Dlibhdfs=1 -Dfusedfs=1`
NOTE: for amd64 architecture, libhdfs will not compile unless you edit
the Makefile in src/c++/libhdfs/Makefile and set OS_ARCH=amd64
@@ -111,7 +111,7 @@
RECOMMENDATIONS
-1. From /bin, `ln -s $HADOOP_HOME/contrib/fuse-dfs/fuse_dfs* .`
+1. From /bin, `ln -s $HADOOP_PREFIX/contrib/fuse-dfs/fuse_dfs* .`
2. Always start with debug on so you can see if you are missing a classpath or something like that.
diff --git a/src/contrib/fuse-dfs/build.xml b/src/contrib/fuse-dfs/build.xml
index 53754a9..516e167 100644
--- a/src/contrib/fuse-dfs/build.xml
+++ b/src/contrib/fuse-dfs/build.xml
@@ -46,7 +46,7 @@
<exec executable="make" failonerror="true">
<env key="OS_NAME" value="${os.name}"/>
<env key="OS_ARCH" value="${os.arch}"/>
- <env key="HADOOP_HOME" value="${hadoop.root}"/>
+ <env key="HADOOP_PREFIX" value="${hadoop.root}"/>
<env key="PACKAGE_VERSION" value="0.1.0"/>
<env key="BUILD_PLATFORM" value="${build.platform}" />
<env key="PERMS" value="${perms}"/>
diff --git a/src/contrib/fuse-dfs/src/Makefile.am b/src/contrib/fuse-dfs/src/Makefile.am
index 8e66d44..d62384d 100644
--- a/src/contrib/fuse-dfs/src/Makefile.am
+++ b/src/contrib/fuse-dfs/src/Makefile.am
@@ -17,5 +17,5 @@
bin_PROGRAMS = fuse_dfs
fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c fuse_impls_chown.c fuse_impls_create.c fuse_impls_flush.c fuse_impls_getattr.c fuse_impls_mkdir.c fuse_impls_mknod.c fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c fuse_impls_unlink.c fuse_impls_write.c
AM_CFLAGS= -Wall -g
-AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_HOME)/src/c++/libhdfs -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
-AM_LDFLAGS= -L$(HADOOP_HOME)/build/c++/$(BUILD_PLATFORM)/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm
+AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/src/c++/libhdfs -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
+AM_LDFLAGS= -L$(HADOOP_PREFIX)/build/c++/$(BUILD_PLATFORM)/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm
diff --git a/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh b/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh
index cf0fbcb..cf8fa68 100755
--- a/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh
+++ b/src/contrib/fuse-dfs/src/fuse_dfs_wrapper.sh
@@ -16,8 +16,8 @@
# limitations under the License.
#
-if [ "$HADOOP_HOME" = "" ]; then
-export HADOOP_HOME=/usr/local/share/hadoop
+if [ "$HADOOP_PREFIX" = "" ]; then
+export HADOOP_PREFIX=/usr/local/share/hadoop
fi
if [ "$OS_ARCH" = "" ]; then
@@ -33,16 +33,16 @@
fi
# If dev build set paths accordingly
-if [ -d $HADOOP_HDFS_HOME/build ]; then
- export HADOOP_HOME=$HADOOP_HDFS_HOME
- for f in ${HADOOP_HOME}/build/*.jar ; do
+if [ -d $HADOOP_PREFIX/build ]; then
+ export HADOOP_PREFIX=$HADOOP_PREFIX
+ for f in ${HADOOP_PREFIX}/build/*.jar ; do
export CLASSPATH=$CLASSPATH:$f
done
- for f in $HADOOP_HOME/build/ivy/lib/Hadoop-Hdfs/common/*.jar ; do
+ for f in $HADOOP_PREFIX/build/ivy/lib/Hadoop-Hdfs/common/*.jar ; do
export CLASSPATH=$CLASSPATH:$f
done
- export PATH=$HADOOP_HOME/build/contrib/fuse-dfs:$PATH
- export LD_LIBRARY_PATH=$HADOOP_HOME/build/c++/lib:$JAVA_HOME/jre/lib/$OS_ARCH/server
+ export PATH=$HADOOP_PREFIX/build/contrib/fuse-dfs:$PATH
+ export LD_LIBRARY_PATH=$HADOOP_PREFIX/build/c++/lib:$JAVA_HOME/jre/lib/$OS_ARCH/server
fi
fuse_dfs $@
diff --git a/src/contrib/hdfsproxy/README b/src/contrib/hdfsproxy/README
index 495a1be..abf0dc6 100644
--- a/src/contrib/hdfsproxy/README
+++ b/src/contrib/hdfsproxy/README
@@ -38,10 +38,10 @@
> Standard HTTPS Get Support for file transfer
The detailed configuration/set-up guide is in the Forrest
-documentation, which can be found at $HADOOP_HDFS_HOME/docs. In order to build the
+documentation, which can be found at $HADOOP_PREFIX/docs. In order to build the
documentation on your own from source please use the following command in
the downloaded source folder:
ant docs -Dforrest.home=path to forrest -Djava5.home= path to jdk5.
-The documentation so built would be under $HADOOP_HDFS_HOME/build/docs
+The documentation so built would be under $HADOOP_PREFIX/build/docs
diff --git a/src/docs/src/documentation/content/xdocs/hdfsproxy.xml b/src/docs/src/documentation/content/xdocs/hdfsproxy.xml
index 60bde9c..7d52880 100644
--- a/src/docs/src/documentation/content/xdocs/hdfsproxy.xml
+++ b/src/docs/src/documentation/content/xdocs/hdfsproxy.xml
@@ -272,11 +272,11 @@
</section>
<section>
<title> Build Process </title>
- <p>Under <code>$HADOOP_HDFS_HOME</code> do the following <br/>
+ <p>Under <code>$HADOOP_PREFIX</code> do the following <br/>
<code> $ ant clean tar</code> <br/>
<code> $ cd src/contrib/hdfsproxy/</code> <br/>
<code> $ ant clean tar</code> <br/>
- The <code>hdfsproxy-*.tar.gz</code> file will be generated under <code>$HADOOP_HDFS_HOME/build/contrib/hdfsproxy/</code>. Use this tar ball to proceed for the server start-up/shutdown process after necessary configuration.
+ The <code>hdfsproxy-*.tar.gz</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Use this tar ball to proceed for the server start-up/shutdown process after necessary configuration.
</p>
</section>
<section>
@@ -494,22 +494,22 @@
<title> Build and Deployment Process </title>
<section>
<title> Build forwarding war (ROOT.war) </title>
- <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-root-conf dir. Under <code>$HADOOP_HDFS_HOME</code> do the following <br/>
+ <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-root-conf dir. Under <code>$HADOOP_PREFIX</code> do the following <br/>
<code> $ export HDFSPROXY_CONF_DIR=${user.home}/proxy-root-conf</code> <br/>
<code> $ ant clean tar</code> <br/>
<code> $ cd src/contrib/hdfsproxy/</code> <br/>
<code> $ ant clean forward</code> <br/>
- The <code>hdfsproxy-forward-*.war</code> file will be generated under <code>$HADOOP_HDFS_HOME/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it at ROOT.war (if ROOT dir already exists, remove it first) for deployment.
+ The <code>hdfsproxy-forward-*.war</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it at ROOT.war (if ROOT dir already exists, remove it first) for deployment.
</p>
</section>
<section>
<title> Build cluster client war (client.war) </title>
- <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-client-conf dir. Under <code>$HADOOP_HDFS_HOME</code> do the following <br/>
+ <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-client-conf dir. Under <code>$HADOOP_PREFIX</code> do the following <br/>
<code> $ export HDFSPROXY_CONF_DIR=${user.home}/proxy-client-conf</code> <br/>
<code> $ ant clean tar</code> <br/>
<code> $ cd src/contrib/hdfsproxy/</code> <br/>
<code> $ ant clean war</code> <br/>
- The <code>hdfsproxy-*.war</code> file will be generated under <code>$HADOOP_HDFS_HOME/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it properly for deployment.
+ The <code>hdfsproxy-*.war</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it properly for deployment.
</p>
</section>
<section>
diff --git a/src/docs/src/documentation/content/xdocs/libhdfs.xml b/src/docs/src/documentation/content/xdocs/libhdfs.xml
index 6cb9451..44ab6c9 100644
--- a/src/docs/src/documentation/content/xdocs/libhdfs.xml
+++ b/src/docs/src/documentation/content/xdocs/libhdfs.xml
@@ -34,7 +34,7 @@
libhdfs is a JNI based C API for Hadoop's Distributed File System (HDFS).
It provides C APIs to a subset of the HDFS APIs to manipulate HDFS files and
the filesystem. libhdfs is part of the Hadoop distribution and comes
-pre-compiled in ${HADOOP_HOME}/libhdfs/libhdfs.so .
+pre-compiled in ${HADOOP_PREFIX}/libhdfs/libhdfs.so .
</p>
</section>
@@ -45,7 +45,7 @@
The libhdfs APIs are a subset of: <a href="api/org/apache/hadoop/fs/FileSystem.html" >hadoop fs APIs</a>.
</p>
<p>
-The header file for libhdfs describes each API in detail and is available in ${HADOOP_HOME}/src/c++/libhdfs/hdfs.h
+The header file for libhdfs describes each API in detail and is available in ${HADOOP_PREFIX}/src/c++/libhdfs/hdfs.h
</p>
</section>
<section>
@@ -77,8 +77,8 @@
<section>
<title>How To Link With The Library</title>
<p>
-See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_HOME}/src/c++/libhdfs/Makefile) or something like:<br />
-gcc above_sample.c -I${HADOOP_HOME}/src/c++/libhdfs -L${HADOOP_HOME}/libhdfs -lhdfs -o above_sample
+See the Makefile for hdfs_test.c in the libhdfs source directory (${HADOOP_PREFIX}/src/c++/libhdfs/Makefile) or something like:<br />
+gcc above_sample.c -I${HADOOP_PREFIX}/src/c++/libhdfs -L${HADOOP_PREFIX}/libhdfs -lhdfs -o above_sample
</p>
</section>
<section>
@@ -86,8 +86,8 @@
<p>
The most common problem is the CLASSPATH is not set properly when calling a program that uses libhdfs.
Make sure you set it to all the Hadoop jars needed to run Hadoop itself. Currently, there is no way to
-programmatically generate the classpath, but a good bet is to include all the jar files in ${HADOOP_HOME}
-and ${HADOOP_HOME}/lib as well as the right configuration directory containing hdfs-site.xml
+programmatically generate the classpath, but a good bet is to include all the jar files in ${HADOOP_PREFIX}
+and ${HADOOP_PREFIX}/lib as well as the right configuration directory containing hdfs-site.xml
</p>
</section>
<section>
diff --git a/src/packages/deb/hadoop.control/conffile b/src/packages/deb/hadoop.control/conffile
new file mode 100644
index 0000000..e52c79f
--- /dev/null
+++ b/src/packages/deb/hadoop.control/conffile
@@ -0,0 +1,15 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+/etc/hadoop
diff --git a/src/packages/deb/hadoop.control/control b/src/packages/deb/hadoop.control/control
new file mode 100644
index 0000000..06fa668
--- /dev/null
+++ b/src/packages/deb/hadoop.control/control
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+Package: hadoop-hdfs
+Version: @version@
+Section: misc
+Priority: optional
+Provides: hadoop-hdfs
+Architecture: all
+Depends: openjdk-6-jre-headless, hadoop-common
+Maintainer: Apache Software Foundation <general@hadoop.apache.org>
+Description: The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing.
+Distribution: development
diff --git a/src/packages/deb/hadoop.control/postinst b/src/packages/deb/hadoop.control/postinst
new file mode 100644
index 0000000..1fa4374
--- /dev/null
+++ b/src/packages/deb/hadoop.control/postinst
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bash /usr/sbin/update-hdfs-env.sh \
+ --prefix=/usr \
+ --bin-dir=/usr/bin \
+ --sbin-dir=/usr/sbin \
+ --conf-dir=/etc/hadoop \
+ --log-dir=/var/log/hadoop \
+ --pid-dir=/var/run/hadoop
diff --git a/src/packages/deb/hadoop.control/postrm b/src/packages/deb/hadoop.control/postrm
new file mode 100644
index 0000000..0521972
--- /dev/null
+++ b/src/packages/deb/hadoop.control/postrm
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+/usr/sbin/groupdel hadoop 2> /dev/null >dev/null
+exit 0
diff --git a/src/packages/deb/hadoop.control/preinst b/src/packages/deb/hadoop.control/preinst
new file mode 100644
index 0000000..940b7ad
--- /dev/null
+++ b/src/packages/deb/hadoop.control/preinst
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+/usr/sbin/useradd --comment "Hadoop HDFS" --shell /bin/bash -M -r --groups hadoop --home /var/lib/hadoop/hdfs hdfs 2> /dev/null || :
diff --git a/src/packages/deb/hadoop.control/prerm b/src/packages/deb/hadoop.control/prerm
new file mode 100644
index 0000000..6b02c3d
--- /dev/null
+++ b/src/packages/deb/hadoop.control/prerm
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bash /usr/sbin/update-hdfs-env.sh \
+ --prefix=/usr \
+ --bin-dir=/usr/bin \
+ --sbin-dir=/usr/sbin \
+ --conf-dir=/etc/hadoop \
+ --log-dir=/var/log/hadoop \
+ --pid-dir=/var/run/hadoop \
+ --uninstal
diff --git a/src/packages/deb/init.d/hadoop-datanode b/src/packages/deb/init.d/hadoop-datanode
new file mode 100644
index 0000000..77bc6a5
--- /dev/null
+++ b/src/packages/deb/init.d/hadoop-datanode
@@ -0,0 +1,142 @@
+#! /bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+### BEGIN INIT INFO
+# Provides: hadoop-datanode
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop:
+# Short-Description: Apache Hadoop Name Node server
+### END INIT INFO
+
+set -e
+
+# /etc/init.d/hadoop-datanode: start and stop the Apache Hadoop Data Node daemon
+
+test -x /usr/bin/hadoop || exit 0
+( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
+
+umask 022
+
+if test -f /etc/default/hadoop-env.sh; then
+ . /etc/default/hadoop-env.sh
+fi
+
+. /lib/lsb/init-functions
+
+# Are we running from init?
+run_by_init() {
+ ([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
+}
+
+check_for_no_start() {
+ # forget it if we're trying to start, and /etc/hadoop/hadoop-datanode_not_to_be_run exists
+ if [ -e /etc/hadoop/hadoop-datanode_not_to_be_run ]; then
+ if [ "$1" = log_end_msg ]; then
+ log_end_msg 0
+ fi
+ if ! run_by_init; then
+ log_action_msg "Apache Hadoop Data Node server not in use (/etc/hadoop/hadoop-datanode_not_to_be_run)"
+ fi
+ exit 0
+ fi
+}
+
+check_privsep_dir() {
+ # Create the PrivSep empty dir if necessary
+ if [ ! -d ${HADOOP_PID_DIR} ]; then
+ mkdir -p ${HADOOP_PID_DIR}
+ chown root:hadoop ${HADOOP_PID_DIR}
+ chmod 0775 ${HADOOP_PID_DIR}
+ fi
+}
+
+export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
+
+case "$1" in
+ start)
+ check_privsep_dir
+ check_for_no_start
+ log_daemon_msg "Starting Apache Hadoop Data Node server" "hadoop-datanode"
+ if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ stop)
+ log_daemon_msg "Stopping Apache Hadoop Data Node server" "hadoop-datanode"
+ if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+
+ restart)
+ check_privsep_dir
+ log_daemon_msg "Restarting Apache Hadoop Data Node server" "hadoop-datanode"
+ start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid
+ check_for_no_start log_end_msg
+ if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+
+ try-restart)
+ check_privsep_dir
+ log_daemon_msg "Restarting Apache Hadoop Data Node server" "hadoop-datanode"
+ set +e
+ start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid
+ RET="$?"
+ set -e
+ case $RET in
+ 0)
+ # old daemon stopped
+ check_for_no_start log_end_msg
+ if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ 1)
+ # daemon not running
+ log_progress_msg "(not running)"
+ log_end_msg 0
+ ;;
+ *)
+ # failed to stop
+ log_progress_msg "(failed to stop)"
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+
+ status)
+ status_of_proc -p ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid ${JAVA_HOME}/bin/java hadoop-datanode && exit 0 || exit $?
+ ;;
+
+ *)
+ log_action_msg "Usage: /etc/init.d/hadoop-datanode {start|stop|restart|try-restart|status}"
+ exit 1
+esac
+
+exit 0
diff --git a/src/packages/deb/init.d/hadoop-namenode b/src/packages/deb/init.d/hadoop-namenode
new file mode 100644
index 0000000..520177f
--- /dev/null
+++ b/src/packages/deb/init.d/hadoop-namenode
@@ -0,0 +1,154 @@
+#! /bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+### BEGIN INIT INFO
+# Provides: hadoop-namenode
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop:
+# Short-Description: Apache Hadoop Name Node server
+### END INIT INFO
+
+set -e
+
+# /etc/init.d/hadoop-namenode: start and stop the Apache Hadoop Name Node daemon
+
+test -x /usr/bin/hadoop || exit 0
+( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
+
+umask 022
+
+if test -f /etc/default/hadoop-env.sh; then
+ . /etc/default/hadoop-env.sh
+fi
+
+. /lib/lsb/init-functions
+
+# Are we running from init?
+run_by_init() {
+ ([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
+}
+
+check_for_no_start() {
+ # forget it if we're trying to start, and /etc/hadoop/hadoop-namenode_not_to_be_run exists
+ if [ -e /etc/hadoop/hadoop-namenode_not_to_be_run ]; then
+ if [ "$1" = log_end_msg ]; then
+ log_end_msg 0
+ fi
+ if ! run_by_init; then
+ log_action_msg "Apache Hadoop Name Node server not in use (/etc/hadoop/hadoop-namenode_not_to_be_run)"
+ fi
+ exit 0
+ fi
+}
+
+check_privsep_dir() {
+ # Create the PrivSep empty dir if necessary
+ if [ ! -d ${HADOOP_PID_DIR} ]; then
+ mkdir -p ${HADOOP_PID_DIR}
+ chown root:hadoop ${HADOOP_PID_DIR}
+ chmod 0775 ${HADOOP_PID_DIR}
+ fi
+}
+
+format() {
+ su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} namenode -format' hdfs
+}
+
+export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
+
+case "$1" in
+ start)
+ check_privsep_dir
+ check_for_no_start
+ log_daemon_msg "Starting Apache Hadoop Name Node server" "hadoop-namenode"
+ if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ stop)
+ log_daemon_msg "Stopping Apache Hadoop Name Node server" "hadoop-namenode"
+ if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ format)
+ log_daemon_msg "Formatting Apache Hadoop Name Node" "hadoop-namenode"
+ format
+ if [ $? -eq 0 ]; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ restart)
+ check_privsep_dir
+ log_daemon_msg "Restarting Apache Hadoop Name Node server" "hadoop-namenode"
+ start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid
+ check_for_no_start log_end_msg
+ if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+
+ try-restart)
+ check_privsep_dir
+ log_daemon_msg "Restarting Apache Hadoop Name Node server" "hadoop-namenode"
+ set +e
+ start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid
+ RET="$?"
+ set -e
+ case $RET in
+ 0)
+ # old daemon stopped
+ check_for_no_start log_end_msg
+ if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
+ log_end_msg 0
+ else
+ log_end_msg 1
+ fi
+ ;;
+ 1)
+ # daemon not running
+ log_progress_msg "(not running)"
+ log_end_msg 0
+ ;;
+ *)
+ # failed to stop
+ log_progress_msg "(failed to stop)"
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+
+ status)
+ status_of_proc -p ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid ${JAVA_HOME}/bin/java hadoop-namenode && exit 0 || exit $?
+ ;;
+
+ *)
+ log_action_msg "Usage: /etc/init.d/hadoop-namenode {start|stop|restart|try-restart|status}"
+ exit 1
+esac
+
+exit 0
diff --git a/src/packages/rpm/init.d/hadoop-datanode b/src/packages/rpm/init.d/hadoop-datanode
new file mode 100644
index 0000000..019051b
--- /dev/null
+++ b/src/packages/rpm/init.d/hadoop-datanode
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Starts a Hadoop datanode
+#
+# chkconfig: 2345 90 10
+# description: Hadoop datanode
+
+source /etc/rc.d/init.d/functions
+source /etc/default/hadoop-env.sh
+
+RETVAL=0
+PIDFILE="${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid"
+desc="Hadoop datanode daemon"
+
+start() {
+ echo -n $"Starting $desc (hadoop-datanode): "
+ daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start datanode
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-datanode
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Stopping $desc (hadoop-datanode): "
+ daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop datanode
+ RETVAL=$?
+ sleep 5
+ echo
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-datanode $PIDFILE
+}
+
+restart() {
+ stop
+ start
+}
+
+checkstatus(){
+ status -p $PIDFILE ${JAVA_HOME}/bin/java
+ RETVAL=$?
+}
+
+condrestart(){
+ [ -e /var/lock/subsys/hadoop-datanode ] && restart || :
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ checkstatus
+ ;;
+ restart)
+ restart
+ ;;
+ condrestart)
+ condrestart
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|condrestart}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/src/packages/rpm/init.d/hadoop-namenode b/src/packages/rpm/init.d/hadoop-namenode
new file mode 100644
index 0000000..d019ff2
--- /dev/null
+++ b/src/packages/rpm/init.d/hadoop-namenode
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Starts a Hadoop namenode
+#
+# chkconfig: 2345 90 10
+# description: Hadoop namenode
+
+source /etc/rc.d/init.d/functions
+source /etc/default/hadoop-env.sh
+
+RETVAL=0
+PIDFILE="${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid"
+desc="Hadoop namenode daemon"
+
+start() {
+ echo -n $"Starting $desc (hadoop-namenode): "
+ daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start namenode $1
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-namenode
+ return $RETVAL
+}
+
+upgrade() {
+ start -upgrade
+}
+
+stop() {
+ echo -n $"Stopping $desc (hadoop-namenode): "
+ daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop namenode
+ RETVAL=$?
+ sleep 5
+ echo
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-namenode $PIDFILE
+}
+
+checkstatus(){
+ status -p $PIDFILE ${JAVA_HOME}/bin/java
+ RETVAL=$?
+}
+
+restart() {
+ stop
+ start
+}
+
+condrestart(){
+ [ -e /var/lock/subsys/hadoop-namenode ] && restart || :
+}
+
+format() {
+ daemon --user hdfs ${HADOOP_PREFIX}/bin/hdfs namenode -format
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ upgrade)
+ upgrade
+ ;;
+ format)
+ format
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ checkstatus
+ ;;
+ restart)
+ restart
+ ;;
+ condrestart|try-restart)
+ condrestart
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|try-restart|upgrade}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/src/packages/rpm/spec/hadoop-hdfs.spec b/src/packages/rpm/spec/hadoop-hdfs.spec
new file mode 100644
index 0000000..00fb9a9
--- /dev/null
+++ b/src/packages/rpm/spec/hadoop-hdfs.spec
@@ -0,0 +1,175 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# RPM Spec file for Hadoop version @version@
+#
+
+%define name hadoop-hdfs
+%define version @version@
+%define release @package.release@
+
+# Installation Locations
+%define _prefix @package.prefix@
+%define _bin_dir %{_prefix}/bin
+%define _conf_dir @package.conf.dir@
+%define _lib_dir %{_prefix}/lib
+%define _lib64_dir %{_prefix}/lib64
+%define _libexec_dir %{_prefix}/libexec
+%define _log_dir @package.log.dir@
+%define _pid_dir @package.pid.dir@
+%define _sbin_dir %{_prefix}/sbin
+%define _share_dir %{_prefix}/share
+%define _var_dir @package.var.dir@
+
+# Build time settings
+%define _build_dir @package.build.dir@
+%define _final_name @final.name@
+%define debug_package %{nil}
+
+# Disable brp-java-repack-jars for aspect J
+%define __os_install_post \
+ /usr/lib/rpm/redhat/brp-compress \
+ %{!?__debug_package:/usr/lib/rpm/redhat/brp-strip %{__strip}} \
+ /usr/lib/rpm/redhat/brp-strip-static-archive %{__strip} \
+ /usr/lib/rpm/redhat/brp-strip-comment-note %{__strip} %{__objdump} \
+ /usr/lib/rpm/brp-python-bytecompile %{nil}
+
+# RPM searches perl files for dependancies and this breaks for non packaged perl lib
+# like thrift so disable this
+%define _use_internal_dependency_generator 0
+
+%ifarch i386
+%global hadoop_arch Linux-i386-32
+%endif
+%ifarch amd64 x86_64
+%global hadoop_arch Linux-amd64-64
+%endif
+%ifarch noarch
+%global hadoop_arch ""
+%endif
+
+Summary: The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing
+License: Apache License, Version 2.0
+URL: http://hadoop.apache.org/core/
+Vendor: Apache Software Foundation
+Group: Development/Libraries
+Name: %{name}
+Version: %{version}
+Release: %{release}
+Source0: %{_final_name}-bin.tar.gz
+Prefix: %{_prefix}
+Prefix: %{_conf_dir}
+Prefix: %{_log_dir}
+Prefix: %{_pid_dir}
+Buildroot: %{_build_dir}
+Requires: sh-utils, textutils, /usr/sbin/useradd, /usr/sbin/usermod, /sbin/chkconfig, /sbin/service, jdk >= 1.6, hadoop-common >= %{version}
+AutoReqProv: no
+Provides: hadoop-hdfs
+
+%description
+The Apache Hadoop project develops open-source software for reliable, scalable,
+distributed computing. Hadoop includes these subprojects:
+
+HDFS: A distributed file system that provides high throughput access to application data.
+
+%prep
+%setup -n %{_final_name}
+
+%build
+if [ -d ${RPM_BUILD_DIR}%{_prefix} ]; then
+ rm -rf ${RPM_BUILD_DIR}%{_prefix}
+fi
+
+if [ -d ${RPM_BUILD_DIR}%{_log_dir} ]; then
+ rm -rf ${RPM_BUILD_DIR}%{_log_dir}
+fi
+
+if [ -d ${RPM_BUILD_DIR}%{_conf_dir} ]; then
+ rm -rf ${RPM_BUILD_DIR}%{_conf_dir}
+fi
+
+if [ -d ${RPM_BUILD_DIR}%{_pid_dir} ]; then
+ rm -rf ${RPM_BUILD_DIR}%{_pid_dir}
+fi
+
+mkdir -p ${RPM_BUILD_DIR}%{_prefix}
+mkdir -p ${RPM_BUILD_DIR}%{_bin_dir}
+mkdir -p ${RPM_BUILD_DIR}%{_lib_dir}
+%ifarch amd64 x86_64
+mkdir -p ${RPM_BUILD_DIR}%{_lib64_dir}
+%endif
+mkdir -p ${RPM_BUILD_DIR}%{_libexec_dir}
+mkdir -p ${RPM_BUILD_DIR}%{_log_dir}
+mkdir -p ${RPM_BUILD_DIR}%{_conf_dir}
+mkdir -p ${RPM_BUILD_DIR}%{_pid_dir}
+mkdir -p ${RPM_BUILD_DIR}%{_sbin_dir}
+mkdir -p ${RPM_BUILD_DIR}%{_share_dir}
+mkdir -p ${RPM_BUILD_DIR}%{_var_dir}
+mkdir -p ${RPM_BUILD_DIR}/etc/init.d
+
+cp ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-namenode.redhat ${RPM_BUILD_DIR}/etc/init.d/hadoop-namenode
+cp ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-datanode.redhat ${RPM_BUILD_DIR}/etc/init.d/hadoop-datanode
+rm -f ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-namenode.*
+rm -f ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-namenode.*
+
+chmod 0755 ${RPM_BUILD_DIR}/etc/init.d/hadoop-*
+
+
+#########################
+#### INSTALL SECTION ####
+#########################
+%install
+mv ${RPM_BUILD_DIR}/%{_final_name}/bin/* ${RPM_BUILD_DIR}%{_bin_dir}
+rm ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/configuration.xsl
+rm ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/hadoop-metrics2.properties
+mv ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/* ${RPM_BUILD_DIR}%{_conf_dir}
+mv ${RPM_BUILD_DIR}/%{_final_name}/lib/* ${RPM_BUILD_DIR}%{_lib_dir}
+mv ${RPM_BUILD_DIR}/%{_final_name}/libexec/* ${RPM_BUILD_DIR}%{_libexec_dir}
+mv ${RPM_BUILD_DIR}/%{_final_name}/sbin/* ${RPM_BUILD_DIR}%{_sbin_dir}
+mv ${RPM_BUILD_DIR}/%{_final_name}/share/* ${RPM_BUILD_DIR}%{_share_dir}
+rm -rf ${RPM_BUILD_DIR}/%{_final_name}/etc
+
+%pre
+getent group hadoop 2>/dev/null >/dev/null || /usr/sbin/groupadd -r hadoop
+/usr/sbin/useradd --comment "Hadoop HDFS" --shell /bin/bash -M -r --groups hadoop --home %{_var_dir}/hdfs hdfs 2> /dev/null || :
+
+%post
+bash ${RPM_INSTALL_PREFIX0}/sbin/update-hdfs-env.sh \
+ --prefix=${RPM_INSTALL_PREFIX0} \
+ --bin-dir=${RPM_INSTALL_PREFIX0}/bin \
+ --sbin-dir=${RPM_INSTALL_PREFIX0}/sbin \
+ --conf-dir=${RPM_INSTALL_PREFIX1} \
+ --log-dir=${RPM_INSTALL_PREFIX2} \
+ --pid-dir=${RPM_INSTALL_PREFIX3}
+
+%preun
+bash ${RPM_INSTALL_PREFIX0}/sbin/update-hdfs-env.sh \
+ --prefix=${RPM_INSTALL_PREFIX0} \
+ --bin-dir=${RPM_INSTALL_PREFIX0}/bin \
+ --sbin-dir=${RPM_INSTALL_PREFIX0}/sbin \
+ --conf-dir=${RPM_INSTALL_PREFIX1} \
+ --log-dir=${RPM_INSTALL_PREFIX2} \
+ --pid-dir=${RPM_INSTALL_PREFIX3} \
+ --uninstall
+
+%files
+%defattr(-,root,root)
+%attr(0755,root,hadoop) %{_log_dir}
+%attr(0775,root,hadoop) %{_pid_dir}
+%config(noreplace) %{_conf_dir}/hdfs-site.xml
+%{_prefix}
+%attr(0775,root,root) /etc/init.d/hadoop-namenode
+%attr(0775,root,root) /etc/init.d/hadoop-datanode
diff --git a/src/packages/templates/conf/hdfs-site.xml b/src/packages/templates/conf/hdfs-site.xml
new file mode 100644
index 0000000..f319dfd
--- /dev/null
+++ b/src/packages/templates/conf/hdfs-site.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.replication</name>
+ <value>${HADOOP_REPLICATION}</value>
+ </property>
+ <property>
+ <name>dfs.name.dir</name>
+ <value>${HADOOP_NN_DIR}</value>
+ </property>
+ <property>
+ <name>dfs.data.dir</name>
+ <value>${HADOOP_DN_DIR}</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>/tmp</value>
+ </property>
+ <property>
+ <name>dfs.namenode.safemode.threshold-pct</name>
+ <value>1.0f</value>
+ </property>
+ <property>
+ <name>dfs.namenode.safemode.extension</name>
+ <value>3</value>
+ </property>
+</configuration>
+
diff --git a/src/packages/update-hdfs-env.sh b/src/packages/update-hdfs-env.sh
new file mode 100644
index 0000000..648ad05
--- /dev/null
+++ b/src/packages/update-hdfs-env.sh
@@ -0,0 +1,128 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script configures hdfs-env.sh and symlinkis directories for
+# relocating RPM locations.
+
+usage() {
+ echo "
+usage: $0 <parameters>
+ Required parameters:
+ --prefix=PREFIX path to install into
+
+ Optional parameters:
+ --arch=i386 OS Architecture
+ --bin-dir=PREFIX/bin Executable directory
+ --conf-dir=/etc/hadoop Configuration directory
+ --log-dir=/var/log/hadoop Log directory
+ --pid-dir=/var/run PID file location
+ --sbin-dir=PREFIX/sbin System executable directory
+ "
+ exit 1
+}
+
+OPTS=$(getopt \
+ -n $0 \
+ -o '' \
+ -l 'arch:' \
+ -l 'prefix:' \
+ -l 'bin-dir:' \
+ -l 'conf-dir:' \
+ -l 'lib-dir:' \
+ -l 'log-dir:' \
+ -l 'pid-dir:' \
+ -l 'sbin-dir:' \
+ -l 'uninstall' \
+ -- "$@")
+
+if [ $? != 0 ] ; then
+ usage
+fi
+
+eval set -- "${OPTS}"
+while true ; do
+ case "$1" in
+ --arch)
+ ARCH=$2 ; shift 2
+ ;;
+ --prefix)
+ PREFIX=$2 ; shift 2
+ ;;
+ --bin-dir)
+ BIN_DIR=$2 ; shift 2
+ ;;
+ --log-dir)
+ LOG_DIR=$2 ; shift 2
+ ;;
+ --lib-dir)
+ LIB_DIR=$2 ; shift 2
+ ;;
+ --conf-dir)
+ CONF_DIR=$2 ; shift 2
+ ;;
+ --pid-dir)
+ PID_DIR=$2 ; shift 2
+ ;;
+ --sbin-dir)
+ SBIN_DIR=$2 ; shift 2
+ ;;
+ --uninstall)
+ UNINSTALL=1; shift
+ ;;
+ --)
+ shift ; break
+ ;;
+ *)
+ echo "Unknown option: $1"
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+for var in PREFIX; do
+ if [ -z "$(eval "echo \$$var")" ]; then
+ echo Missing param: $var
+ usage
+ fi
+done
+
+ARCH=${ARCH:-i386}
+BIN_DIR=${BIN_DIR:-$PREFIX/bin}
+CONF_DIR=${CONF_DIR:-$PREFIX/etc/hadoop}
+LIB_DIR=${LIB_DIR:-$PREFIX/lib}
+LOG_DIR=${LOG_DIR:-$PREFIX/var/log}
+PID_DIR=${PID_DIR:-$PREFIX/var/run}
+SBIN_DIR=${SBIN_DIR:-$PREFIX/sbin}
+UNINSTALL=${UNINSTALL:-0}
+
+if [ "${ARCH}" != "i386" ]; then
+ LIB_DIR=${LIB_DIR}64
+fi
+
+if [ "${UNINSTALL}" -ne "1" ]; then
+ mkdir -p ${LOG_DIR}
+ chown hdfs:hadoop ${LOG_DIR}
+ chmod 755 ${LOG_DIR}
+
+ if [ ! -d ${PID_DIR} ]; then
+ mkdir -p ${PID_DIR}
+ chown root:hadoop ${PID_DIR}
+ chmod 775 ${PID_DIR}
+ fi
+
+fi
diff --git a/src/test/system/conf/system-test-hdfs.xml b/src/test/system/conf/system-test-hdfs.xml
index 707ab07..4e54062 100644
--- a/src/test/system/conf/system-test-hdfs.xml
+++ b/src/test/system/conf/system-test-hdfs.xml
@@ -118,7 +118,7 @@
<description>
Local file system path on gate way to cluster-controller binary including the binary name.
To build the binary the following commands need to be executed:
- % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_HOME of setup cluster)
+ % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_PREFIX of setup cluster)
% cp build-fi/system/c++-build/runAs test.system.hdrc.multi-user.binary.path
Location of the cluster is important security precaution.
The binary should be owned by root and test user group permission should be set such a