| #!/usr/bin/env bash |
| |
| # Licensed to the Apache Software Foundation (ASF) under one or more |
| # contributor license agreements. See the NOTICE file distributed with |
| # this work for additional information regarding copyright ownership. |
| # The ASF licenses this file to You under the Apache License, Version 2.0 |
| # (the "License"); you may not use this file except in compliance with |
| # the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| |
| function hadoop_usage |
| { |
| echo "Usage: hdfs [--config confdir] [--daemon (start|stop|status)] COMMAND" |
| echo " where COMMAND is one of:" |
| echo " balancer run a cluster balancing utility" |
| echo " cacheadmin configure the HDFS cache" |
| echo " classpath prints the class path needed to get the" |
| echo " Hadoop jar and the required libraries" |
| echo " datanode run a DFS datanode" |
| echo " dfs run a filesystem command on the file system" |
| echo " dfsadmin run a DFS admin client" |
| echo " fetchdt fetch a delegation token from the NameNode" |
| echo " fsck run a DFS filesystem checking utility" |
| echo " getconf get config values from configuration" |
| echo " groups get the groups which users belong to" |
| echo " haadmin run a DFS HA admin client" |
| echo " jmxget get JMX exported values from NameNode or DataNode." |
| echo " journalnode run the DFS journalnode" |
| echo " lsSnapshottableDir list all snapshottable dirs owned by the current user" |
| echo " Use -help to see options" |
| echo " namenode run the DFS namenode" |
| echo " Use -format to initialize the DFS filesystem" |
| echo " nfs3 run an NFS version 3 gateway" |
| echo " oev apply the offline edits viewer to an edits file" |
| echo " oiv apply the offline fsimage viewer to an fsimage" |
| echo " oiv_legacy apply the offline fsimage viewer to a legacy fsimage" |
| echo " portmap run a portmap service" |
| echo " secondarynamenode run the DFS secondary namenode" |
| echo " snapshotDiff diff two snapshots of a directory or diff the" |
| echo " current directory contents with a snapshot" |
| echo " zkfc run the ZK Failover Controller daemon" |
| echo "" |
| echo "Most commands print help when invoked w/o parameters." |
| } |
| |
| # let's locate libexec... |
| if [[ -n "${HADOOP_PREFIX}" ]]; then |
| DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec" |
| else |
| this="${BASH_SOURCE-$0}" |
| bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) |
| DEFAULT_LIBEXEC_DIR="${bin}/../libexec" |
| fi |
| |
| HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}" |
| # shellcheck disable=SC2034 |
| HADOOP_NEW_CONFIG=true |
| if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then |
| . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" |
| else |
| echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 |
| exit 1 |
| fi |
| |
| if [[ $# = 0 ]]; then |
| hadoop_exit_with_usage 1 |
| fi |
| |
| COMMAND=$1 |
| shift |
| |
| case ${COMMAND} in |
| balancer) |
| CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}" |
| ;; |
| cacheadmin) |
| CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin |
| ;; |
| classpath) |
| hadoop_finalize |
| echo "${CLASSPATH}" |
| exit |
| ;; |
| datanode) |
| daemon="true" |
| # Determine if we're starting a secure datanode, and |
| # if so, redefine appropriate variables |
| if [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then |
| secure_service="true" |
| secure_user="${HADOOP_SECURE_DN_USER}" |
| |
| # backward compatiblity |
| HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}" |
| HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}" |
| |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS} ${HADOOP_DATANODE_OPTS}" |
| CLASS="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter" |
| else |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}" |
| CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode' |
| fi |
| ;; |
| dfs) |
| CLASS=org.apache.hadoop.fs.FsShell |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" |
| ;; |
| dfsadmin) |
| CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" |
| ;; |
| fetchdt) |
| CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher |
| ;; |
| fsck) |
| CLASS=org.apache.hadoop.hdfs.tools.DFSck |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" |
| ;; |
| getconf) |
| CLASS=org.apache.hadoop.hdfs.tools.GetConf |
| ;; |
| groups) |
| CLASS=org.apache.hadoop.hdfs.tools.GetGroups |
| ;; |
| haadmin) |
| CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin |
| CLASSPATH="${CLASSPATH}:${TOOL_PATH}" |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" |
| ;; |
| journalnode) |
| daemon="true" |
| CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode' |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}" |
| ;; |
| jmxget) |
| CLASS=org.apache.hadoop.hdfs.tools.JMXGet |
| ;; |
| lsSnapshottableDir) |
| CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir |
| ;; |
| namenode) |
| daemon="true" |
| CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode' |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}" |
| ;; |
| nfs3) |
| daemon="true" |
| if [[ -n "${HADOOP_PRIVILEGED_NFS_USER}" ]]; then |
| secure_service="true" |
| secure_user="${HADOOP_PRIVILEGED_NFS_USER}" |
| |
| # backward compatiblity |
| HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}" |
| HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}" |
| |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS} ${HADOOP_NFS3_OPTS}" |
| CLASS=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter |
| else |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}" |
| CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3 |
| fi |
| ;; |
| oev) |
| CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer |
| ;; |
| oiv) |
| CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB |
| ;; |
| oiv_legacy) |
| CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer |
| ;; |
| portmap) |
| daemon="true" |
| CLASS=org.apache.hadoop.portmap.Portmap |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}" |
| ;; |
| secondarynamenode) |
| daemon="true" |
| CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}" |
| ;; |
| snapshotDiff) |
| CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff |
| ;; |
| zkfc) |
| daemon="true" |
| CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController' |
| HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}" |
| ;; |
| -*) |
| hadoop_exit_with_usage 1 |
| ;; |
| *) |
| CLASS="${COMMAND}" |
| ;; |
| esac |
| |
| if [[ -n "${secure_service}" ]]; then |
| HADOOP_SECURE_USER="${secure_user}" |
| if hadoop_verify_secure_prereq; then |
| hadoop_setup_secure_service |
| priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out" |
| priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err" |
| priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid" |
| daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" |
| daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid" |
| fi |
| else |
| daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" |
| daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid" |
| fi |
| |
| if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then |
| # shellcheck disable=SC2034 |
| HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}" |
| if [[ -n "${secure_service}" ]]; then |
| # shellcheck disable=SC2034 |
| HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log" |
| else |
| # shellcheck disable=SC2034 |
| HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log" |
| fi |
| fi |
| |
| hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}" |
| hadoop_finalize |
| |
| export CLASSPATH |
| |
| if [[ -n "${daemon}" ]]; then |
| if [[ -n "${secure_service}" ]]; then |
| hadoop_secure_daemon_handler \ |
| "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\ |
| "${daemon_pidfile}" "${daemon_outfile}" \ |
| "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@" |
| else |
| hadoop_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\ |
| "${daemon_pidfile}" "${daemon_outfile}" "$@" |
| fi |
| exit $? |
| else |
| # shellcheck disable=SC2086 |
| hadoop_java_exec "${COMMAND}" "${CLASS}" "$@" |
| fi |